import torch
import numpy as np
import torch.nn.functional as F
from Datasets import LyricsDataset
seq_len = 48

dataset = LyricsDataset(seq_len=seq_len)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load("lyrics.pt",weights_only=False).to(device)


def generate_transformer(start_with, max_length=100, temperature=1.0, top_k=None, device=device):
    model.eval()
    with torch.no_grad():
        tokens = [dataset.word2index['<SOS>']]
        # 选择按词或按字符
        tokens += [dataset.word2index.get(w, dataset.word2index["<UNK>"]) for w in start_with.strip()]
        input_ids = torch.tensor([tokens], dtype=torch.long, device=device)

        for _ in range(max_length):
            outputs = model(input_ids)  # (batch, seq_len, vocab)
            logits = outputs[0, -1, :]  # last timestep
            logits = logits / max(temperature, 1e-8)

            if top_k is not None:
                # top-k 采样
                values, indices = torch.topk(logits, top_k)
                probs = F.softmax(values, dim=-1)
                chosen = torch.multinomial(probs, 1).item()
                next_token = indices[chosen].item()
            else:
                # greedy
                next_token = torch.argmax(logits).item()

            input_ids = torch.cat([input_ids, torch.tensor([[next_token]], device=device)], dim=1)

            if next_token == dataset.word2index.get("<EOS>", -1):
                break

        generated = input_ids.squeeze(0).tolist()
        words = [dataset.index2word.get(i, "<UNK>") for i in generated]
        if words and words[0] == '<SOS>':
            words = words[1:]
        if '<EOS>' in words:
            words = words[:words.index('<EOS>')]

        # 按你的 token 单位选择 join 方式
        return ''.join(words)  # 字符级

while True:
    inp = input()
    if inp != 'q':
        print(generate_transformer(inp))
    else:
        break