input_size = 4
hidden_size = 4
batch_size = 1
# 构建输入输出字典
idx2char = ['e', 'h', 'l', 'o'] # 字典
x_data = [1, 0, 2, 2, 3]  # 输入序列是hello
y_data = [3, 1, 2, 3, 2]  # 输出序列是ohlol
one_hot_lookup = [[1, 0, 0, 0],
                     [0, 1, 0, 0],
                     [0, 0, 1, 0],
                     [0, 0, 0, 1]]
#构造独热向量,把输入数据转化为独热向量,此时向量维度为(SeqLen * InputSize)
x_one_hot = [one_hot_lookup[x] for x in x_data] 
# [[0, 1, 0, 0], --> 1
#  [1, 0, 0, 0], --> 0
#  [0, 0, 1, 0], --> 2
#  [0, 0, 1, 0], --> 2
#  [0, 0, 0, 1]] --> 3
# 把输入转化为(seqLen,batchSize,inputSize)
inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)#inputs:(5,1,4) 
# 把标签转化为(seqLen,batchSize)
labels = torch.LongTensor(y_data).view(-1, 1) # labels:(5,1)
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.batch_size = 1
        self.input_size = 4
        self.hidden_size = 4
        self.num_layers = 1
        self.rnn = nn.RNN(input_size=self.input_size,
                            hidden_size=self.hidden_size,
                             num_layers=self.num_layers
                             # batch_first=True默认False
                            )
       # 如果batch_first=Tru,输入形状为[batch_size,seq_len,input_size]
       # 如果batch_first=False,输入形状为[seq_len,batch_size,input_size]
    def forward(self, input):
        # h0=torch.zeros(self.batch_size, self.hidden_size)
        # 在RNN中h0比RnnCell多一个隐藏层维度
        h0=torch.zeros(self.num_layers,self.batch_size,self.hidden_size)
        # RNN要有两个输入,x为数据,h为序列中上一个元素计算的结果。
        # 由于第一个元素没有上一个元素,所以要指定一个初始值。
        # 如果没有先验数据,一般设置为全0。
        # h0形状:(num_layer * batch_size * hidden_size)
        # input:(seqLen,batchSize,inputSize)
        out, hidden  = self.rnn(input, h0)
        # out:(seq_len, batch, hidden)（序列长度,样本数,每个单元的维度）
        # hidden:(numlayers, batch, hidden)这里不使用
        return out

net = Model()
criterion = nn.CrossEntropyLoss()
# nn.CrossEntropyLoss()是nn.logSoftmax()和nn.NLLLoss()的整合
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)
for epoch in range(15):
    loss = 0
    out = net(inputs) 
    # inputs:(5,1,4) 
    # out:(5,1,4) (seqlen, batch_size, hidden_size)
    y_pred = out.permute(0,2,1 ) 
    # y_pred:(5,4,1)  (seqlen, hidden_size, batch_size)
    # torch.Tensor.permute将tensor的维度换位。
    loss += criterion(y_pred ,labels) 
    # y_pred:(5,4,1)  (seqlen, hidden_size, batch_size)
    # labels:(5,1)    (seqlen, batch_size)
    _, idx = y_pred.max(dim=1) # idx:(5,1)
    print(idx2char[idx[0].item()],
          idx2char[idx[1].item()],
          idx2char[idx[2].item()],
          idx2char[idx[3].item()],
          idx2char[idx[4].item()],end='')
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    print(',Epoch [%d/15] loss=%.4f' % (epoch + 1, loss.item()))
