encoder = Encoder()
atten_decoder = AttentionDecoder()
criterion = torch.nn.CrossEntropyLoss(reduction='mean')
optimizer = torch.optim.Adam(itertools.chain(encoder.parameters(),
atten_decoder.parameters()),
                                   lr = 0.001)
for epoch in range(epochs):
    print("epoch:{}".format(epoch))
    # shuffle training data
    perm = np.random.permutation(len(train_en_sents))
    train_en_sents_shuffled = train_en_sents[perm]
    train_cn_sents_shuffled = train_cn_sents[perm]
    train_cn_label_sents_shuffled = train_cn_label_sents[perm]

    train_en_shape = train_en_sents_shuffled.shape[0]
    for iteration in range(train_en_shape // batch_size):
        batch_len = (batch_size * iteration): (batch_size * (iteration + 1))
        x_data = train_en_sents_shuffled[ batch_len]
        sent = torch.from_numpy(x_data)  # sent:(16,11)
        encoder_outputs = encoder(sent)
        # encoder_outputs:(16,11,256)        
        x_cn_data =train_cn_sents_shuffled[batch_len]
        x_cn_label_data = train_cn_label_sents_shuffled[batch_len].astype('int64')
        previous_hidden = torch.zeros([batch_size,1,hidden_size])
        # previous_hidden shape:
        #     ( batch,
        #       num_layer(=1)*num_of_direction(=1),
        #       hidden_size )
        # previous_hidden :(16,1,256)
        
        previous_cell = torch.zeros([batch_size,1,hidden_size])
        # previous_cell :(16,1,256)
        # 单步的LSTM：在解码器的部分,同样使用LSTM与Encoder部分
        # 不同的是,下面代码,每次只让LSTM往前计算一次。
        # 整体的部分,在训练循环内完成。
        loss = 0
        for i in range(MAX_LEN + 2):
            cn_word = torch.from_numpy(x_cn_data[:,i: i + 1])
            cn_word_label = torch.from_numpy(x_cn_label_data[:,i])
            logits,(previous_hidden,previous_cell)=atten_decoder(cn_word,
                                                                            previous_hidden,
                                                                            previous_cell,
                                                                            encoder_outputs)
        step_loss = criterion(logits, cn_word_label)
        loss += step_loss
        loss = loss / (MAX_LEN + 2)
        if (iteration % 20 == 0):
            print("iter {}, loss:{}".format(iteration, loss.detach()))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()  
