import torch as pt
import numpy as np


class MyLstmRnnNet(pt.nn.Module):

    def __init__(self, n_input, n_output, n_hidden, n_layers, n_steps, **kwargs):
        super().__init__(**kwargs)
        self.n_output = n_output
        self.n_hidden = n_hidden
        self.n_steps = n_steps
        self.lstm = pt.nn.LSTM(n_input, n_hidden, n_layers, batch_first=True)
        self.fc = pt.nn.Linear(n_hidden, n_output)

    def forward(self, inputs):  # (2, 6, 5)
        # print('inputs', inputs.size())
        x, _ = self.lstm(inputs)  # (2, 6, 15)
        # print('after lstm', x.size())
        x = x.reshape((-1, self.n_hidden))  # (12, 15)
        # print('before fc', x.size())
        x = self.fc(x)  # (12, 5) => y: (12, ) for sparse
        # print('after fc', x.size())
        return x


def accuracy(y_true, y_pred):
    y_pred = pt.argmax(y_pred, dim=1)
    acc = pt.eq(y_true, y_pred).float().mean()
    return acc

if '__main__' == __name__:

    pt.manual_seed(1)
    np.random.seed(1)

    import python_ai.CV_2.dl.rnn.my.hihello_data as data

    n_steps = np.shape(data.x_data)[1]
    model = MyLstmRnnNet(data.nb_input, data.nb_output, data.nb_neuron,
                         data.layers_num, n_steps)

    criterion = pt.nn.CrossEntropyLoss()
    optim = pt.optim.Adam(model.parameters(), lr=0.1)

    X = pt.autograd.Variable(
        pt.eye(data.dict_len)[pt.LongTensor(data.x_data)]
    )
    y = pt.autograd.Variable(pt.LongTensor(data.y_data))
    y = y.view(-1)  # (12, )
    print('X', X.size())
    print('y', y.size())

    ITERS = 25
    for step in range(ITERS):
        # model.train(True)
        optim.zero_grad()
        h = model(X)
        loss = criterion(h, y)
        loss.backward()
        optim.step()
        # model.train(False)
        acc = accuracy(y, h)
        result = [''.join([data.idx2char[i] for i in row]) for row in h.argmax(dim=1).detach().numpy().reshape((-1, n_steps))]

        print(f'#{step + 1}, loss = {loss.item()}, acc = {acc.item()}, str = {result}')


