import torch
import torch.nn as nn
import torch.optim as optim

idx2char = ['e', 'h', 'l', 'o']
char2idx = {ch: i for i, ch in enumerate(idx2char)}  # {'e': 0, 'h': 1, 'l': 2, 'o': 3}

x_data = [
    [1, 0, 2, 2],  # "hell"
    [0, 1, 2, 3],  # "ehlo"
    [2, 2, 3, 0],  # "lloe"
    [3, 1, 0, 2],  # "ohel"
]
y_data = [
    [0, 2, 2, 3],  # "ello"
    [1, 2, 3, 0],  # "hloe"
    [2, 3, 0, 1],  # "loeh"
    [1, 0, 2, 2],  # "hell"
]

inputs = torch.LongTensor(x_data)  # (B, T)
labels = torch.LongTensor(y_data)  # (B, T)


class CharRNN(nn.Module):
    def __init__(self, vocab_size, embed_dim=10, hidden_size=16, num_layers=1):
        super().__init__()
        self.emb = nn.Embedding(vocab_size, embed_dim)
        self.rnn = nn.RNN(
            input_size=embed_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True
        )
        self.fc = nn.Linear(hidden_size, vocab_size)

    def forward(self, x):
        # 输入: (B, T)
        x = self.emb(x)  # (B, T, E)
        out, _ = self.rnn(x)  # (B, T, H)
        logits = self.fc(out)  # (B, T, V)
        return logits


vocab_size = len(idx2char)
model = CharRNN(vocab_size=vocab_size)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.05)

model.train()
epochs = 200

for epoch in range(1, epochs + 1):
    optimizer.zero_grad()
    logits = model(inputs)  # (B, T, V)
    # reshape 为 (B*T, V) 与 (B*T,)
    loss = criterion(logits.reshape(-1, vocab_size), labels.reshape(-1))
    loss.backward()
    optimizer.step()
    if epoch % 50 == 0:
        with torch.no_grad():
            print("logits.shape:", logits.shape)
            print("logits:", logits)
            pred = logits.argmax(dim=-1)  # (B, T)
            print("pred.shape:", pred.shape)
            print("pred:", pred)
            for b in range(inputs.size(0)):  # 打印每个batch样本的预测结果
                pred_str = ''.join(idx2char[i] for i in pred[b].tolist())
                print(f"Sample {b + 1} pred: {pred_str}")
        print(f"[epoch {epoch:03d}] loss={loss.item():.4f}")

save_path = "../models/char_rnn_batch_state_dict.pth"
torch.save(model.state_dict(), save_path)
print(f"模型参数已保存到: {save_path}")
