encoder.eval()
atten_decoder.eval()
num_of_evaluate = 10
indices = np.random.choice(len(train_en_sents), num_of_evaluate,replace=False) 
x_data = train_en_sents[indices]
sent = torch.from_numpy(x_data)
en_repr = encoder(sent)

word = np.array([[cn_vocab['<bos>']]] * num_of_evaluate)
word = torch.from_numpy(word)

hidden=torch.zeros([num_of_evaluate, 1, hidden_size ])                                  
cell = torch.zeros([num_of_evaluate, 1, hidden_size ]) 
# hidden , cell 形状:(16,1,256) (16,1,256) 

decoded_sent = []
for i in range(MAX_LEN + 2):
    logits, (hidden, cell) = atten_decoder(word, hidden, cell, en_repr)
    word = torch.argmax(logits, dim=1)
    decoded_sent.append(word.numpy())
    word = torch.unsqueeze(word, dim=-1)

results = np.stack(decoded_sent, axis=1) 

for i in range(num_of_evaluate):
    en_input = " ".join(filtered_pairs[indices[i]][0]) 
    ground_truth_translate = "".join(filtered_pairs[indices[i]][1]) 

    model_translate = ""
    for k in results[i]:
        w = list(cn_vocab)[k]
        if w != '<pad>' and w != '<eos>':
            model_translate += w

    print(en_input)
    print("true: {}".format(ground_truth_translate))
    print("pred: {}".format(model_translate)) 
