import torch
import torch.nn as nn
import torch.nn.functional as F


input_size = 100   # 输入数据编码的维度
hidden_size = 20   # 隐含层维度
num_layers = 1     # 隐含层层数

rnn = nn.RNN(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,batch_first=True)
for _ in rnn.parameters():
    print(_.shape)
# quit()


print("rnn:",rnn)

seq_len = 10        # 句子长度
# batch_size = 1      
batch_size = 5      
x = torch.randn(batch_size,seq_len,input_size)        # 输入数据    (5,10,100)
h0 = torch.zeros(num_layers,batch_size,hidden_size)   # 输入数据

out, h = rnn(x, h0)  # 输出数据

print("out.shape:",out.shape)  # (batch_size,seq_len,hidden_size)
print("h.shape:",h.shape)  # (num_layers,batch,hidden_size)

'''
out.shape: torch.Size([5, 10, 20])
h.shape: torch.Size([4, 5, 20])

'''
print(out[:, -1, :])
print(h)
quit()

fc = nn.Linear(hidden_size,1) # (hidden_size,1)
fc.bias.data.zero_()


out = out[:,-1,:]
y = fc(out)
print(y.shape)  # (seq_len*batch_size,1)  # torch.Size([5, 1]) 最后一个维度的结果作为输出


# y1 = y.unsqueeze(dim=0)
# print(y1.shape)  # 