import logging
import oneflow as torch
import oneflow.nn as nn
from oasr.module.pos import MixedPositionalEncoding
from oasr.decoder.transformer import TransformerDecoderLayer


logger = logging.getLogger(__name__)


class BaseNonAutoregressiveDecoder(nn.Module):
    def __init__(self, d_model, vocab_size, relative_positional=False, share_embedding=False, vector_as_input=False, layer_type='transformer'):
        super(BaseNonAutoregressiveDecoder, self).__init__()

        self.layer_type = layer_type
        self.relative_positional = relative_positional
        self.vector_as_input = vector_as_input # if true, it means apply tensor as input

        self.d_model = d_model

        if not self.vector_as_input:
            self.embedding = nn.Embedding(vocab_size, d_model)

        self.pos_encoding = MixedPositionalEncoding(d_model)

        self.blocks = None

        self.output_layer = nn.Linear(d_model, vocab_size)

        if share_embedding and not vector_as_input:
            assert self.embedding.weight.size() == self.output_layer.weight.size()
            self.output_layer.weight = self.embedding.weight
            logger.info('Tie the weights between the embedding and output layer.')

    def forward(self, inputs, memory, memory_mask, dec_mask=None, conv_mask=None):

        if not self.vector_as_input:
            # random mask input targets with mask token
            dec_output = self.embedding(inputs)
        else:
            dec_output = inputs
        
        dec_output, pos = self.pos_encoding(dec_output)

        attn_weights = {}
        for i, block in enumerate(self.blocks):
            if self.layer_type == 'transformer':
                dec_output, attn_weight= block(dec_output, dec_mask, memory, memory_mask.unsqueeze(1), pos)
            else:
                dec_output, attn_weight= block(dec_output, dec_mask, memory, memory_mask.unsqueeze(1), pos, conv_mask)
            attn_weights['dec_block_%d' % i] = attn_weight

        logits = self.output_layer(dec_output)

        return logits, attn_weights

 
class NonAutoregressiveTransformerDecoder(BaseNonAutoregressiveDecoder):
    def __init__(self, vocab_size, d_model, d_ff, n_heads, memory_dim, n_blocks, slf_attn_dropout=0.0, src_attn_dropout=0.0, ffn_dropout=0.0,
                 residual_dropout=0.1, relative_positional=False, activation='glu', share_embedding=False, vector_as_input=False):
        super().__init__(d_model, vocab_size, relative_positional, share_embedding, vector_as_input, layer_type='transformer')

        self.blocks = nn.ModuleList(
            [
                TransformerDecoderLayer(
                    n_heads, d_model, d_ff, memory_dim, slf_attn_dropout=slf_attn_dropout, src_attn_dropout=src_attn_dropout, ffn_dropout=ffn_dropout,
                    residual_dropout=residual_dropout, relative_positional=relative_positional, activation=activation
                ) for _ in range(n_blocks)
            ]
        )

    def forward(self, inputs, memory, memory_mask, dec_mask=None):
        return super().forward(inputs, memory, memory_mask, dec_mask)


NonAutoregressiveDecoder = {
    'transformer': NonAutoregressiveTransformerDecoder,
}