import torch
import torch.nn as nn

idx2char = ['e', 'h', 'l', 'o']
char2idx = {ch: i for i, ch in enumerate(idx2char)}  # {'e': 0, 'h': 1, 'l': 2, 'o': 3}


class CharRNN(nn.Module):
    def __init__(self, vocab_size, embed_dim=10, hidden_size=16, num_layers=1):
        super().__init__()
        self.emb = nn.Embedding(vocab_size, embed_dim)
        self.rnn = nn.RNN(
            input_size=embed_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True
        )
        self.fc = nn.Linear(hidden_size, vocab_size)

    def forward(self, x):
        # 输入: (B, T)
        x = self.emb(x)  # (B, T, E)
        out, _ = self.rnn(x)  # (B, T, H)
        logits = self.fc(out)  # (B, T, V)
        return logits


vocab_size = len(idx2char)
model = CharRNN(vocab_size=vocab_size)

save_path = "../models/char_rnn_batch_state_dict.pth"
model.load_state_dict(torch.load(save_path))
print(f"✅ 已从 {save_path} 加载模型参数。")

model.eval()

test_x = [
    [1, 0, 2, 2],  # "hell"
    [0, 1, 2, 3],  # "ehlo"
    [2, 2, 3, 0],  # "lloe"
    [3, 1, 0, 2],  # "ohel"
]

inputs = torch.LongTensor(test_x)  # (B, T)
print("测试输入维度:", inputs.shape)

# ===============================
# 前向推理
# ===============================
with torch.no_grad():
    logits = model(inputs)  # (B, T, V)
    preds = logits.argmax(dim=-1)  # (B, T)
    print("\n预测索引矩阵:")
    print(preds)

    # 打印每个样本的预测结果
    print("\n预测结果：")
    for b in range(inputs.size(0)):
        pred_str = ''.join(idx2char[i] for i in preds[b].tolist())
        print(f"样本 {b + 1}: {pred_str}")


# ===============================
# 单样本逐步生成（扩展预测）
# ===============================
@torch.no_grad()
def predict_next_char(model, prefix_idxs):
    """
    给定前缀索引序列，预测下一个字符索引
    """
    x = torch.LongTensor([prefix_idxs])  # (1, T)
    logits = model(x)  # (1, T, V)
    next_logits = logits[0, -1]  # 最后时间步
    next_idx = int(next_logits.argmax().item())
    return next_idx


@torch.no_grad()
def generate(model, start_idxs, out_len=3):
    """
    给定起始序列，连续预测 out_len 个字符
    """
    seq = list(start_idxs)
    for _ in range(out_len):
        nxt = predict_next_char(model, seq)
        seq.append(nxt)
    return seq


# 以第一个样本为起始序列生成新的字符
start = [1, 0, 2, 2]  # "hell"
gen_idxs = generate(model, start_idxs=start, out_len=3)
gen_text = ''.join(idx2char[i] for i in gen_idxs)

print("\n生成索引序列:", gen_idxs)
print("生成文本:", gen_text)
