import torch
import torch.nn as nn
import torch.nn.functional as F


input_size = 100   # 输入数据编码的维度
hidden_size = 20   # 隐含层维度
num_layers = 4     # 隐含层层数

rnn = nn.RNN(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers,batch_first=False)
print("rnn:",rnn)

seq_len = 10        # 句子长度
# batch_size = 1      
batch_size = 5      
x = torch.randn(seq_len,batch_size,input_size)        # 输入数据   
h0 = torch.zeros(num_layers,batch_size,hidden_size)   # 初始隐藏状态    

out, h = rnn(x, h0)  # 输出数据

print("out.shape:",out.shape)  # (seq_len,batch_size,hidden_size)
print("h.shape:",h.shape)  # (num_layers,batch,hidden_size)

'''
out.shape: torch.Size([10, 1, 20])
h.shape: torch.Size([4, 1, 20])

'''

'''
out.shape: torch.Size([10, 5, 20])  # seq_len,batch_size,hidden_size
h.shape: torch.Size([4, 5, 20])  # num_layers,batch,hidden_size

'''


fc = nn.Linear(hidden_size,1) # (hidden_size,1)
fc.bias.data.zero_()


out = out.view(-1,hidden_size)
y = fc(out)
print(y.shape)  # (seq_len*batch_size,1)


y1 = y.unsqueeze(dim=0)
print(y1.shape) 