import torch
from torch import nn

batch_size = 10
seq_len = 20
embedding_dim = 30
word_vocab = 100

input = torch.randint(low=0,high=100,size = [batch_size ,seq_len])

embedding = nn.Embedding(word_vocab,embedding_dim)
LSTM = nn.LSTM(embedding_dim,hidden_size=18,num_layers = 2,batch_first = True)
GRU  =nn.GRU(embedding_dim,hidden_size=18,num_layers = 2,batch_first=True)
biLSTM = nn.LSTM(embedding_dim,hidden_size=18,num_layers = 2,batch_first=True,bidirectional = True)

x = embedding(input)
print('---'*5+'LSTM'+'---'*5)
out_LSTM,(h_0,c_0) = LSTM(x)
print(out_LSTM.shape)
print(h_0.shape)
print(c_0.shape)
last_layer = out_LSTM[:,-1,:]
last_hidden = h_0[-1,:,:]
print(last_layer.size(),last_hidden.size())
print(last_layer == last_hidden)

print('---'*5+'GRU'+'---'*5)
out_GRU,h_1 = GRU(x)
print(out_GRU.shape)
print(h_1.shape)
last_layer = out_GRU[:,-1,:]
last_hidden = h_1[-1,:,:]
print(last_layer.size(),last_hidden.size())
print(last_layer == last_hidden)


print('---'*5+'biLSTM'+'---'*5)
out_biLSTM,(h_2,c_2) = biLSTM(x)

print(out_biLSTM.shape)
print(h_2.shape)
print(c_2.shape)
### 前向
last_pre = out_biLSTM[:,-1,:18]
last_hidden_pre = h_2[-2,:,:]
print(last_pre.size(),last_hidden_pre.size())
print(last_pre == last_hidden_pre)
print('------'*5)
### 后项
last_out = out_biLSTM[:,0,18:]
last_hidden = h_2[-1,:,:]
print(last_out.shape,last_hidden.shape)
print(last_out == last_hidden)