import torch
import torch.nn as nn

class RNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        '''
        :param input_size: 输入张量最后一个维度的大小
        :param hidden_size: 隐藏层张量最后一个维度的大小
        :param output_size: 输出张量最后一个维度的大小
        '''

        super(RNN, self).__init__()

        self.hidden_size = hidden_size

        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input1, hidden1):
        output, hidden = self.rnn(input1.unsqueeze(0), hidden1.unsqueeze(0))
        output = self.fc(output.squeeze(0))
        output = self.softmax(output)
        return output, hidden.squeeze(0)

    def initHidden(self):
        # 将隐藏层初始化为一个[1, hidden_size]的全零张量
        return torch.zeros(1, self.hidden_size)

if __name__ == '__main__':
    input_size = 768
    hidden_size = 128
    n_categories = 2

    input = torch.rand(1, input_size)
    hidden = torch.rand(1, hidden_size)

    # from RNN_MODEL import RNN

    rnn = RNN(input_size, hidden_size, n_categories)
    outputs, hidden = rnn(input, hidden)
    print("outputs:", outputs)
    print("hidden:", hidden)
