import torch
import numpy as np
from torch import nn
from torch.autograd import *
from torch import FloatTensor

TIME_CYCLE = 3
BATCH_SIZE = 1


class RnnClassification(nn.Module):

    def __init__(self):
        super(RnnClassification, self).__init__()
        self.recurrent_layers = nn.RNN(
            input_size=1,
            hidden_size=32,
            num_layers=3,
            batch_first=True
        )
        self.linear = nn.Linear(32, 2)
        self.softmax = nn.Softmax()

    def forward(self, x):
        # x shape is [batch_size,time_cycle,1]
        rnn_out, rnn_state = self.recurrent_layers(x, None)
        last_rnn_out = rnn_out[:, -1, :]
        return self.softmax(self.linear(last_rnn_out))


def main():
    rnn = RnnClassification().cuda()
    optimizer = torch.optim.Adam(rnn.parameters(), 1e-2)
    loss_func = nn.CrossEntropyLoss()

    train_data = [
        [
            [0.1],
            [0.2],
            [0.3]
        ],
        [
            [0.4],
            [0.5],
            [0.6]
        ]
    ]

    label_dat = [
        1,
        0
    ]

    for i in range(100):
        rnn_last_out = rnn(Variable(FloatTensor(np.asarray(train_data))).cuda())
        label = Variable(torch.LongTensor(np.asarray(label_dat))).cuda()
        loss = loss_func(rnn_last_out, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i % 20 == 0:
            rnn_out_data = rnn_last_out.cpu().data
            print(rnn_out_data)
            print(str(np.argmax(rnn_last_out.cpu().data, 0)))


if __name__ == "__main__":
    main()
