import torch
from torch import nn
from torch.nn import LSTM


class Encoder(nn.Module):
    def __init__(self, input_size, embed_dim):
        super(Encoder, self).__init__()
        self.lstm = LSTM(input_size, embed_dim, batch_first=True, bidirectional=True)

    def forward(self, inputs, hidden=None):
        output, hidden = self.lstm(inputs, hidden)
        return output, hidden


encoder = Encoder(1,3)
data = torch.tensor([[1.,2.]])
data = data.view(1,2,1)

# # hidden = (torch.zeros(2,3,3),torch.zeros(2,3,3))
# for i in range(2):
#     output, hidden = encoder(data[:,i,:].view(1,1,1))
#     print('output: {}'.format(output))
#     print('hidden: {}'.format(hidden[0]))

# print(output[-1])
# print(hidden[0][:,0,:])

output_c, hidden_c = encoder(data)
print(output_c)
print(hidden_c)