# -*- coding: utf-8 -*-

import torch.nn as nn
from transformer.module.decoder_layer import DecoderLayer
from transformer.module.positional_encoding import PositionalEncoding


class TransformerDecoder(nn.Module):
    def __init__(self, v_size, d_model, n_head, n_layer, n_position,
                 dropout=0.1, embedding=None):
        """

        :param v_size:
        :param d_model:
        :param n_head:
        :param n_layer:
        :param n_position:      the max length of sequence
        :param dropout:
        :param embedding:
        """
        super(TransformerDecoder, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.n_layer = n_layer
        self.dropout = dropout

        if embedding is not None:
            self.embedding = embedding
        else:
            self.embedding = nn.Embedding(v_size, d_model)
        self.positional_embedding = PositionalEncoding(d_model, n_position)

        self.decoder_layers = nn.ModuleList()
        for i in range(self.n_layer):
            layer_i = DecoderLayer(self.d_model, self.n_head, self.dropout)
            self.decoder_layers.append(layer_i)

    def forward(self, src_enc, tgt_idx, src_mask=None):
        """

        :param src_enc:         B, L, H
        :param tgt_idx:         B, L'
        :param src_mask:        B, 1, 1, L
        :return:
        """
        tgt_emb = self.embedding(tgt_idx)
        pos_tgt_emb = self.positional_embedding(tgt_emb)
        output = pos_tgt_emb

        for i in range(self.n_layer):
            layer_i = self.decoder_layers[i]
            output = layer_i.forward(src_enc, output, src_mask=src_mask)

        return output
