import torch
import torch.nn as nn

idx2char = ['e', 'h', 'l', 'o']
char2idx = {ch: i for i, ch in enumerate(idx2char)}  # {'e': 0, 'h': 1, 'l': 2, 'o': 3}


class CharRNN(nn.Module):
    def __init__(self, vocab_size, embed_dim=10, hidden_size=16, num_layers=1):
        super().__init__()
        self.emb = nn.Embedding(vocab_size, embed_dim)
        self.rnn = nn.RNN(
            input_size=embed_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True
        )
        self.fc = nn.Linear(hidden_size, vocab_size)

    def forward(self, x):
        x = self.emb(x)  # (B, T, E)
        out, hn = self.rnn(x)  # (B, T, H)
        logits = self.fc(out)  # (B, T, V)
        return logits


loaded = CharRNN(vocab_size=len(idx2char))

save_path = "../models/char_rnn_state_dict.pth"
loaded.load_state_dict(torch.load(save_path))
loaded.eval()
print("已从磁盘加载模型参数。")


@torch.no_grad()
def predict_next_char(model, prefix_idxs):
    """
    给定前缀索引序列，预测下一字符类别（argmax）。
    prefix_idxs: List[int]，如 [1,0,2,2] 表示 "hell" 里的 h,e,l,l
    """
    x = torch.LongTensor([prefix_idxs])  # (1, T)
    print("x:")
    print(x)
    logits = model(x)  # (1, T, V)
    print("logits:")
    print(logits)
    next_logits = logits[0, -1]  # 取最后一个时间步的输出 (V,)
    next_idx = int(next_logits.argmax().item())
    print("next_logits:", next_logits)
    print("next_idx:", next_idx)
    return next_idx


@torch.no_grad()
def generate(model, start_idxs, out_len=5):
    """
    以 start_idxs 为启动前缀，逐步生成后续 out_len 个字符。
    返回生成的完整序列（包含起始前缀）。
    """
    seq = list(start_idxs)
    for _ in range(out_len):
        nxt = predict_next_char(model, seq)
        seq.append(nxt)
    return seq


start = [1, 0, 2, 2]  # "h","e","l","l"
gen_idxs = generate(loaded, start_idxs=start, out_len=1)
gen_text = ''.join(idx2char[i] for i in gen_idxs)
print("生成索引序列：", gen_idxs)
print("生成文本：", gen_text)
