import torch
import torch.nn as nn
import torch.optim as optim

batch_size = 1
seq_len = 5
input_size = 4
embedding_size = 10
hidden_size = 8
num_layers = 2
num_class = 4

idx2char = ['e', 'h', 'l', 'o']
x_data = [
    [1, 0, 2, 2, 3]  # 表示一个批次
]
y_data = [3, 1, 2, 3, 2]
inputs = torch.LongTensor(x_data)
labels = torch.LongTensor(y_data)


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        # | 参数名              | 含义
        # | `num_embeddings` | 嵌入字典的大小（例如词汇表大小）
        # | `embedding_dim`  | 每个词或类别被映射的向量维度
        # | `padding_idx`    | 指定一个索引作为填充符，其向量恒为零
        # | `max_norm`       | 可选，对每个嵌入向量进行最大范数约束
        self.emb = nn.Embedding(input_size, embedding_size)
        self.rnn = nn.RNN(
            input_size=embedding_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True
        )
        self.fc = nn.Linear(hidden_size, num_class)

    def forward(self, x):
        x = self.emb(x)
        print("x.shape: ", x.shape)
        print("x: ", x)
        x, _ = self.rnn(x)
        print("x.shape: ", x.shape)
        print("x: ", x)
        x = self.fc(x)
        print("x.shape: ", x.shape)
        print("x: ", x)
        return x.view(-1, num_class)


net = Model()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.05)

for epoch in range(15):
    optimizer.zero_grad()
    outputs = net(inputs)
    print("outputs.shape: ", outputs.shape)
    print("outputs: ", outputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()
    _, idx = outputs.max(dim=1)
    idx = idx.data.numpy()
    print('Predicted: ', idx, end='')
    print('Predicted: ', ''.join([idx2char[x] for x in idx]), end='')
    print(', Epoch [%d/15] loss = %.3f' % (epoch + 1, loss.item()))
    break
