import torch
import torch.nn as nn
import torch.optim as optim

batch_size = 1
seq_len = 5
input_size = 4
hidden_size = 4
num_layers = 1

index2char = ['e', 'h', 'l', 'o']
one_hot_lookup = [[1, 0, 0, 0],
                  [0, 1, 0, 0],
                  [0, 0, 1, 0],
                  [0, 0, 0, 1]]

x_data = [1, 0, 2, 2, 3]  # hello
y_data = [3, 1, 2, 3, 2]  # ohlol
x_one_hot = [one_hot_lookup[x] for x in x_data]

# 每个batch_size中有seq_len个input_size维的张量，共有batch_size个batch
inputs = torch.Tensor(x_one_hot).view(batch_size, seq_len, input_size)
labels = torch.LongTensor(y_data)


class Model(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers=1):
        super(Model, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)

    def forward(self, input):
        out, _ = self.rnn(input)
        return out.view(-1, self.hidden_size)


net = Model(input_size, hidden_size, num_layers)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.1)

for epoch in range(15):
    outputs = net(inputs)
    print(outputs)
    loss = criterion(outputs, labels)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    print(f"Epoch: {epoch + 1}, Loss: {loss.item()}", end=' ')
    print('Predicted string: ', end='')
    for i in range(seq_len):
        # torch.Tensor.max(dim=0) 返回的不是一个单纯的数值，而是一个包含两个元素的对象
        # values, indices = tensor.max(dim=0)
        idx = outputs[i].max(dim=0)[1].item()
        print(outputs[i].max(dim=0))
        print(index2char[idx], end=' ')
    print()
    break
