from mindspore import nn


class Transformer(nn.Cell):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    def construct(self, enc_inputs, dec_inputs, src_pad_idx, trg_pad_idx):
        """
        Args:
            enc_inputs: [batch_size, S]
            dec_inputs: [batch_size, L]
            src_pad_idx: int
            trg_pad_idx: int
        Returns:
            dec_logits: [batch_size * L, trg_vocab_size]
        """
        enc_outputs, enc_self_attns = self.encoder(enc_inputs, src_pad_idx)
        dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs, src_pad_idx,
                                                                  trg_pad_idx)

        dec_logits = dec_outputs.view((-1, dec_outputs.shape[-1]))

        return dec_logits, enc_self_attns, dec_self_attns, dec_enc_attns


if __name__ == '__main__':
    from transformers_network.decoder import Decoder
    from transformers_network.encoder import Encoder
    from mindspore import ops, Tensor

    src_vocab_size = 600
    trg_vocab_size = 666
    src_pad_idx = 0
    trg_pad_idx = 0

    d_model = 512
    d_ff = 2048
    n_layers = 6
    n_heads = 8

    encoder = Encoder(src_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)
    decoder = Decoder(trg_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)
    model = Transformer(encoder, decoder)

    enc_inputs = ops.randn((2, 10))
    dec_inputs = ops.randn((2, 15))

    dec_logits, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_inputs, dec_inputs, src_pad_idx, trg_pad_idx)
    print(dec_logits.shape)   # (2 * 15, 666)
    print(len(enc_self_attns))
    print(len(dec_self_attns))
    print(len(dec_enc_attns))