import torch.nn as nn
import torch

output_size = 2

class myRNN(nn.Module):
    def __init__(self, input_size=9, hidden_size=64, batch_size=1, num_layers=2):
        super(myRNN, self).__init__()
        self.rnn = nn.RNN(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers, )
        self.output_layer = nn.Linear(hidden_size, output_size)

    def forward(self, inputs, h_state):
        # x (time_step, batch, input_size)
        # h_state (n_layers, batch, hidden_size)
        # rnn_out (time_step, batch, hidden_size)
        rnn_out, h_state = self.rnn(inputs, h_state)   # h_state是之前的隐层状态
        out = []
        for time in range(rnn_out.size(0)):
            every_time_out = rnn_out[time, :, :]       # 相当于获取每个时间点上的输出，然后过输出层
            out.append(self.output_layer(every_time_out))
        return torch.stack(out, dim=1).view(-1, output_size), h_state

class myLSTM(nn.Module):
    def __init__(self, input_size=9, hidden_size=64, batch_size=1, num_layers=2):
        super(myLSTM, self).__init__()
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.rnn = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers, )
        self.output_layer = nn.Linear(hidden_size, output_size)

    def forward(self, inputs, hc):
        # x (time_step, batch, input_size)
        # h_state (n_layers, batch, hidden_size)
        # rnn_out (time_step, batch, hidden_size)
        rnn_out, hc = self.rnn(inputs, hc)   # h_state是之前的隐层状态
        # print("rnn_out shape:", rnn_out.shape)
        # return self.output_layer(rnn_out[:, -1]), hc    # 这是拿最后一个
        out = []
        for time in range(rnn_out.size(0)):
            every_time_out = rnn_out[time, :, :]       # 相当于获取每个时间点上的输出，然后过输出层
            out.append(self.output_layer(every_time_out))
        return torch.stack(out, dim=1).view(-1, output_size), hc
