import torch
import torch.nn as nn
from torch.autograd import Variable
import python_ai.CV_2.dl.rnn.my.hihello_data as data


class RNN(nn.Module):

    def __init__(self, nb_input, nb_output, nb_neuron, layers_num):
        super(RNN, self).__init__()
        self.lstm = nn.LSTM(input_size=nb_input, hidden_size=nb_neuron, num_layers=layers_num, batch_first=True)
        self.fc = nn.Linear(in_features=nb_neuron, out_features=nb_output)

    def forward(self, inputs):
        x, _ = self.lstm(inputs, None)
        x = torch.squeeze(x)
        x = self.fc(x)
        return x


if '__main__' == __name__:
    model = RNN(data.nb_input, data.nb_output, data.nb_neuron, data.layers_num)

    criterions = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

    X = Variable(torch.eye(len(data.char_set))[torch.LongTensor(data.x_data)])
    y = Variable(torch.LongTensor(data.y_data))

    for step in range(20):
        optimizer.zero_grad()
        h = model(X)
        loss = criterions(h, y.reshape(-1))
        loss.backward()
        optimizer.step()
        pre = torch.argmax(h, 1)
        acc = (pre == y).float().mean()
        result = [data.char_set[i] for i in pre]

        print('step:', step + 1, 'loss:', loss.item(), 'acc:{:.3f}'.format(acc.item()))
        print('char_str:', ''.join(result))
