embedding_size = 128
hidden_size = 256
num_encoder_lstm_layers = 1
en_vocab_size = len(list(en_vocab))
cn_vocab_size = len(list(cn_vocab))
epochs = 20
batch_size = 16

# Encoder部分
# encoder: simply learn representation of source sentence
class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.emb = nn.Embedding(en_vocab_size,  embedding_size,) 
        self.lstm = nn.LSTM(embedding_size,
                                hidden_size,
                                num_encoder_lstm_layers,
                                batch_first=True )
    def forward(self, x):
        x = self.emb(x)
        h, (_, _) = self.lstm(x)
        return h
