import torch as pt
from python_ai.common.xcommon import *
import numpy as np
from torch.nn.functional import one_hot


class MyLstmNet(pt.nn.Module):

    def __init__(self, n_input, n_neuron, n_layers, **kwargs):
        super().__init__(**kwargs)
        self.n_neuron = n_neuron
        self.lstm = pt.nn.LSTM(n_input, n_neuron, n_layers, batch_first=True)
        self.fc = pt.nn.Linear(n_neuron, n_input)
        # self.softmax = pt.nn.Softmax()  # UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
        self.softmax = pt.nn.Softmax(dim=1)

    def forward(self, x):  # m x n_step x n_input
        x, _ = self.lstm(x)  # m x n_step x n_neuron
        # print('output of lstm', x.size())
        x = x.reshape(-1, self.n_neuron)  # m*n_step x n_neuron
        # print('after resize', x.size())
        x = self.fc(x)  # m*n_step x n_input
        # print('after fc', x.size())
        x = self.softmax(x)  # m*n_step x n_input
        # print('after softmax', x.size())
        return x


def accuracy(y_true, y_pred):  # m*n_step x 1, m*n_step x n_input
    y_true = y_true.long()
    y_pred = y_pred.argmax(dim=1)
    acc = pt.eq(y_true, y_pred).float().mean()
    return acc


if '__main__' == __name__:
    import python_ai.CV_2.dl.rnn.my.hihello_data as data
    import matplotlib.pyplot as plt

    ALPHA = 0.01
    ITERS = 200

    device = 'cuda:0' if pt.cuda.is_available() else 'cpu'
    device = pt.device(device)

    model = MyLstmNet(data.dict_len, data.nb_neuron, 2).to(device)
    criterion = pt.nn.NLLLoss()

    optim = pt.optim.Adam(model.parameters(), lr=ALPHA)
    print('x_data_oh', np.shape(data.x_data_oh))
    print('y_data', np.shape(data.y_data))
    x_data = pt.Tensor(data.x_data_oh).to(device)
    # y_data = pt.Tensor(data.y_data).long().reshape(-1, 1).to(device)  # RuntimeError: multi-target not supported at C:/w/b/windows/pytorch/aten/src\THCUNN/generic/ClassNLLCriterion.cu:15
    y_data = pt.Tensor(data.y_data).long().reshape(-1).to(device)
    GROUP = int(np.ceil(ITERS / 20))
    cost_hist = []
    for step in range(ITERS):
        model.train(True)
        optim.zero_grad()
        h = model(x_data)  # m*n_step x n_input
        loss = criterion(h, y_data)  # m*n_step x n_input; (m*n_step, )
        loss.backward()
        optim.step()
        model.train(False)
        cost = loss.detach().item()
        cost_hist.append(cost)
        acc = accuracy(y_data, h).detach().item()
        if step % GROUP == 0 or step == ITERS - 1:
            h_numpy = h.detach().cpu().numpy()
            h_str = data.h_to_str(h_numpy)
            print(f'#{step + 1}: cost = {cost}, acc = {acc}, str = {h_str}')

    plt.title('loss in iterations')
    plt.plot(cost_hist)

plt.show()
