import torch
import torch.nn as nn
from torch.autograd import Variable as V


#Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
#output_size = (input_size-kernel_size+2*padding)/stride+1
m = nn.Conv1d(16, 33, 3, stride=2, padding=1)
input = V(torch.randn(100, 16, 50)) #b*c*n
output = m(input)
print(output.size())

#ConvTranspose2d(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True)
m = nn.ConvTranspose2d(16, 33, 3, stride=2)
m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
input = V(torch.randn(100, 16, 64, 64))
output = m(input)
print(output.size())

#MaxPool2d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)

#MaxUnpool2d(kernel_size, stride=None, padding=0)
#Maxpool2d的逆过程，不过并不是完全的逆过程，因为在maxpool2d的过程中，一些最大值的已经丢失。 

#AvgPool2d(kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) 

#RNN, LSTM, GRU, RNNCell, LSTMCell, GRUCell
lstm = nn.LSTM(64, 128, 10, bidirectional=False) #(input_size,hidden_size,num_layers)
input = V(torch.randn(80, 100, 64)) #(seq_len,batch_size,input_size)
h0 = V(torch.randn(10, 100, 128)) #(num_layers* 1,batch_size,hidden_size)
c0 = V(torch.randn(10, 100, 128)) #(num_layers* 1,batch_size,hidden_size)
#out:  (seq_len, batch, hidden_size * num_directions)
#最后一层每个时间步的输出(80,100,128)
#hn: (num_layers * num_directions, batch, hidden_size)
#每一层最后一个时间步的隐状态(10,100,128)
#cn: 同hn
out, (hn,cn) = lstm(input, (h0, c0))
print(out.size(),hn.size(),cn.size())