import logging
import oneflow as flow
import oneflow.nn as nn
from oasr.module.norm import LayerNorm
from oasr.module.pos import MixedPositionalEncoding
from oasr.module.front import ConvFrontEnd
from oasr.module.mha import MultiHeadedAttention, MultiHeadedSelfAttention
from oasr.module.ffn import PositionwiseFeedForward
from oasr.module.front import Conv2dSubsampling


logger = logging.getLogger(__name__)


class TransformerEncoderLayer(nn.Module):
    def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout_rate, 
                 ffn_dropout_rate, residual_dropout_rate, normalize_before=False,
                 concat_after=False, activation='relu', apply_initialization=False):
        super(TransformerEncoderLayer, self).__init__()

        self.apply_initialization = apply_initialization

        self.self_attn = MultiHeadedAttention(
            n_heads, d_model, slf_attn_dropout_rate)
        self.feed_forward = PositionwiseFeedForward(
            d_model, d_ff, ffn_dropout_rate, activation)

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)

        self.dropout1 = nn.Dropout(residual_dropout_rate)
        self.dropout2 = nn.Dropout(residual_dropout_rate)

        self.normalize_before = normalize_before
        self.concat_after = concat_after

        if self.concat_after:
            self.concat_linear = nn.Linear(d_model * 2, d_model)

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, x, mask):
        """Compute encoded features

        :param flow.Tensor x: encoded source features (batch, max_time_in, size)
        :param flow.Tensor mask: mask for x (batch, max_time_in)
        :rtype: Tuple[flow.Tensor, flow.Tensor]
        """
        residual = x
        if self.normalize_before:
            x = self.norm1(x)
        if self.concat_after:
            x_concat = flow.cat((x, self.self_attn(x, x, x, mask)), dim=-1)
            x = residual + self.concat_linear(x_concat)
        else:
            x = residual + self.dropout1(self.self_attn(x, x, x, mask))
        if not self.normalize_before:
            x = self.norm1(x)

        residual = x
        if self.normalize_before:
            x = self.norm2(x)
        x = residual + self.dropout2(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm2(x)

        return x, mask

    def inference(self, x, mask, cache=None):
        residual = x
        if self.normalize_before:
            x = self.norm1(x)

        slf_out = self.self_attn.inference_slf(x, x, x, mask, cache)
        if self.concat_after:
            x_concat = flow.cat((x, slf_out), dim=-1)
            x = residual + self.concat_linear(x_concat)
        else:
            x = residual + self.dropout1(slf_out)
        if not self.normalize_before:
            x = self.norm1(x)

        residual = x
        if self.normalize_before:
            x = self.norm2(x)
        x = residual + self.dropout2(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm2(x)

        return x, mask

    def init_parameters(self):
        self.self_attn.init_parameters()
        self.feed_forward.init_parameters()
        logger.debug('===== Initialize %s =====' % self.__class__.__name__)


class TransformerEncoder(nn.Module):
    def __init__(self, input_size, d_model=256, n_heads=4, d_ff=2048, n_blocks=6,
                 pos_dropout_rate=0.0, slf_attn_dropout_rate=0.0, ffn_dropout_rate=0.0, residual_dropout_rate=0.1,
                 normalize_before=False, concat_after=False, activation='relu', pos_scale_learnable=False,
                 apply_initialization=False):
        super(TransformerEncoder, self).__init__()

        self.apply_initialization = apply_initialization
        self.normalize_before = normalize_before

        self.embed = Conv2dSubsampling(input_size, d_model, pos_dropout_rate, pos_scale_learnable=pos_scale_learnable)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout_rate, ffn_dropout_rate,
                residual_dropout_rate=residual_dropout_rate, normalize_before=normalize_before,
                concat_after=concat_after, activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.after_norm = LayerNorm(d_model)

        self.output_size = d_model

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, inputs, inputs_mask):

        enc_output, enc_mask = self.embed(inputs, inputs_mask)

        enc_output=flow.masked_fill(enc_output,enc_mask.transpose(1, 2)==0, 0.0)

        for _, block in enumerate(self.blocks):
            enc_output, enc_mask = block(enc_output, enc_mask)

        if self.normalize_before:
            enc_output = self.after_norm(enc_output)

        return enc_output, enc_mask
    
    def init_parameters(self):
        self.embed.init_parameters()
        for _, block in enumerate(self.blocks):
            block.init_parameters()
        logger.info('===== Initialized TransformerDecoder =====')


class TransformerEncoderLayerV2(nn.Module):
    def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout_rate, ffn_dropout_rate, residual_dropout_rate,
                 normalize_before=False, concat_after=False, share_qvk_proj=False, activation='relu'):
        super(TransformerEncoderLayerV2, self).__init__()

        self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout_rate, share_qvk_proj=share_qvk_proj)
        self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout_rate, activation)

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)

        self.dropout1 = nn.Dropout(residual_dropout_rate)
        self.dropout2 = nn.Dropout(residual_dropout_rate)

        self.normalize_before = normalize_before
        self.concat_after = concat_after

        if self.concat_after:
            self.concat_linear = nn.Linear(d_model * 2, d_model)

    def forward(self, x, mask):
        if self.normalize_before:
            x = self.norm1(x)
        residual = x
        slf_attn_out, slf_attn_weights = self.slf_attn(x, mask)
        if self.concat_after:
            x = residual + self.concat_linear(flow.cat((x, slf_attn_out), dim=-1))
        else:
            x = residual + self.dropout1(slf_attn_out)
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        x = residual + self.dropout2(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm2(x)

        return x, {'slf_attn_weights': slf_attn_weights}

    def inference(self, x, mask, cache=None):
        if self.normalize_before:
            x = self.norm1(x)
        residual = x
        slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache)
        if self.concat_after:
            x = residual + self.concat_linear(flow.cat((x, slf_attn_out), dim=-1))
        else:
            x = residual + slf_attn_out
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        x = residual + self.feed_forward(x)
        if not self.normalize_before:
            x = self.norm2(x)

        return x, new_cache, {'slf_attn_weights': slf_attn_weights}


class TransformerEncoderV2(nn.Module):
    def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout_rate=0.0, 
                 slf_attn_dropout_rate=0.0, ffn_dropout_rate=0.0, residual_dropout_rate=0.1, normalize_before=False,
                 concat_after=False, share_qvk_proj=False, activation='relu', pos_scale_learnable=False):
        super(TransformerEncoderV2, self).__init__()

        self.normalize_before = normalize_before

        self.pos_emb = MixedPositionalEncoding(d_model, pos_dropout_rate, scale_learnable=pos_scale_learnable)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayerV2(
                n_heads, d_model, d_ff, slf_attn_dropout_rate, ffn_dropout_rate,
                residual_dropout_rate=residual_dropout_rate, normalize_before=normalize_before,
                concat_after=concat_after, share_qvk_proj=share_qvk_proj, activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.norm = LayerNorm(d_model)

    def forward(self, inputs, mask):
    
        enc_output, _ = self.pos_emb(inputs)

        enc_output = flow.masked_fill(enc_output,mask.unsqueeze(2)==0, 0.0)

        attn_weights = {}
        for i, block in enumerate(self.blocks):
            enc_output, attn_weight = block(enc_output, mask.unsqueeze(1))
            attn_weights['enc_block_%d' % i] = attn_weight

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask, attn_weights


    def inference(self, inputs, mask, cache=None):
    
        enc_output, _ = self.pos_emb.inference(inputs)

        enc_output=flow.masked_fill(enc_output,mask.unsqueeze(2)==0, 0.0)

        attn_weights = {}
        new_caches = []
        for i, block in enumerate(self.blocks):
            enc_output, new_cache, attn_weight = block.inference(enc_output, mask.unsqueeze(1), cache)
            attn_weights['enc_block_%d' % i] = attn_weight
            new_caches.append(new_cache)

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask, new_caches, attn_weights

