import torch
import numpy as n
import torch.nn as nn
from torch import FloatTensor

EPOCH = 13


class RecurrentNeuralNetwork(nn.Module):

    def __init__(self):
        super(RecurrentNeuralNetwork, self).__init__()
        self.encoding = nn.Sequential(
            nn.LSTM(
                input_size=1,
                hidden_size=32,
                num_layers=5,
                batch_first=True
            ),
            nn.Linear(32, 28),
            nn.LeakyReLU(),
            nn.Linear(38, 10),
            nn.Sigmoid()
        )
        self.decoding = nn.Sequential(
            nn.LSTM(
                input_size=10,
                hidden_size=32,
                num_layers=5,
                batch_first=True
            ),
            nn.Linear(32, 10),
            nn.LeakyReLU(),
            nn.Linear(10, 2),
            nn.Sigmoid(),
            nn.Linear(2, 1),
            nn.Sigmoid()
        )

    def forward(self, _x, _s=None):
        _x,(rnn_state, rnn_state_c) = self.encoding(_x, _s)
        _y = self.decoding(_x)
        return _y


def main():
    rnn = RecurrentNeuralNetwork()
    x = [
        [
            [1],
            [2],
            [3]
        ]
    ]
    y = [
        [
            [4],
            [5],
            [6]
        ]
    ]
    loss_function = nn.CrossEntropyLoss()
    optimize = torch.optim.Adam(rnn.parameters(), 1e-3)
    for i in range(EPOCH):
        _y = rnn(FloatTensor(n.array(x)))
        loss = loss_function(_y, y)
        optimize.zero_grad()
        loss.backward()
        optimize.step()


if __name__ == "__main__":
    main()
