from torch import nn

from NMTDemo.NMTModel.NMTDecoder import NMTDecoder
from NMTDemo.NMTModel.NMTEncoder import NMTEncoder


class NMTModel(nn.Module):
    def __init__(self,source_vocab_size,source_embedding_size,
                 target_vocab_size,target_embedding_size,encoding_size,
                 target_bos_index):
        """ source_vocab_size,要翻译的文本的单词数量
            source_embedding_size,原嵌入向量的大小
            target_vocab_size，目标文本单词的数量
            target_embedding_size，目标文本嵌入向量的大小
            encoding_size，编码器RNN的大小
        """
        super(NMTModel,self).__init__()
        print("source_vocab_size={},source_embedding_size={},"
                 "target_vocab_size={},target_embedding_size={},encoding_size={},"
                 "target_bos_index={}".format(source_vocab_size,source_embedding_size,
                 target_vocab_size,target_embedding_size,encoding_size,
                 target_bos_index))
        self.encoder=NMTEncoder(num_embeddings=source_vocab_size,
                                embedding_size=source_embedding_size,
                                rnn_hidden_size=encoding_size)
        decoding_size=encoding_size*2
        self.decoder=NMTDecoder(num_embeddings=target_vocab_size,
                                embedding_size=target_embedding_size,
                                rnn_hidden_size=decoding_size,
                                bos_index=target_bos_index)

    def forward(self,x_source,x_source_lengths,target_sequence):
        encoder_state,final_hidden_states=self.encoder(x_source,x_source_lengths)
        decoded_states=self.decoder(encoder_state=encoder_state,
                                    initial_hidden_state=final_hidden_states,
                                    target_sequence=target_sequence)
        return decoded_states