input_size = 4
hidden_size = 4
batch_size = 1
# 构建输入输出字典
idx2char = ['e', 'h', 'l', 'o']  # 字典
x_data = [1, 0, 2, 2, 3]  # 输入序列是hello
y_data = [3, 1, 2, 3, 2]  # 输出序列是ohlol

one_hot_lookup = [[1, 0, 0, 0],
                     [0, 1, 0, 0],
                     [0, 0, 1, 0],
                     [0, 0, 0, 1]]
#构造独热向量,把输入数据转化为独热向量,此时向量维度为(SeqLen * InputSize)
x_one_hot = [one_hot_lookup[x] for x in x_data] 
# [[0, 1, 0, 0], --> 1
#  [1, 0, 0, 0], --> 0
#  [0, 0, 1, 0], --> 2
#  [0, 0, 1, 0], --> 2
#  [0, 0, 0, 1]] --> 3
# 把输入转化为(seqLen,batchSize,inputSize)
#view(-1,...)保留原始SeqLen,并添加batch_size,input_size两个维度
inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)
# 把标签转化为(seqLen,1)
labels = torch.LongTensor(y_data).view(-1, 1)
class Model(nn.Module):
    def __init__(self,input_size,hidden_size, batch_size):             
        super(Model, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.rnncell = nn.RNNCell(self.input_size, self.hidden_size)
    def forward(self, input, hidden):
        hidden = self.rnncell(input, hidden)
        return hidden
net = Model(input_size, hidden_size, batch_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam( net.parameters() , lr=0.1)
#初始化零向量作为h0
hidden0 = torch.zeros(batch_size, hidden_size)
for epoch in range(15):
    loss = 0
    input1 = inputs[0,:,:]
    label1 = labels[0,:]
    hidden1 = net(input1, hidden0)
    loss1 = criterion(hidden1, label1)# 计算图1 
    # hidden1:(1,4) （seqLen,Inputsize）
    # label1:(1,)（seqLen,）value: tensor([3])
    _, idx = hidden1.max(dim=1)
    print(idx2char[idx.item()],end='')
    input2 = inputs[1,:,:]
    label2 = labels[1,:]
    hidden2 = net(input2,hidden1)
    loss2 = criterion(hidden2,label2)# 计算图2
    _, idx = hidden2.max(dim=1)
    print(idx2char[idx.item()], end='')
    input3 = inputs[2,:,:]
    label3 = labels[2,:]
    hidden3 = net(input3,hidden2)
    loss3 = criterion(hidden3,label3)# 计算图3
    _, idx = hidden3.max(dim=1)
    print(idx2char[idx.item()],end='')
    input4 = inputs[3,:,:]
    label4 = labels[3,:]
    hidden4 = net(input4, hidden3)
    loss4 = criterion(hidden4,label4) # 计算图4
    _, idx = hidden4.max(dim=1)
    print(idx2char[idx.item()],end='')
    input5 = inputs[4,:,:]
    label5 = labels[4,:]
    hidden5 = net(input5,hidden4)
    loss5 = criterion(hidden5, label5)# 计算图5
    _, idx = hidden5.max(dim=1)
    print(idx2char[idx.item()],end='')
    
    # 计算图1,2,3,4,5 建立在一起合并成一个计算图
    tol_loss =  loss1 + loss2 + loss3 + loss4 + loss5
    # 梯度之和等于和的梯度）
    optimizer.zero_grad() # 清空过往梯度
    tol_loss.backward() # 反向传播,计算当前梯度 ,释放计算图内存 
    optimizer.step() # 优化权重参数,即更新net.parameters()参数
    print(',Epoch [%d/15] loss=%.4f' % (epoch+1, loss.item()))
