import logging
import oneflow as torch
import random
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.encoder.base import BaseEncoder
from oasr.module.pos import PositionalEncoding
from oasr.module.mha import MultiHeadedSelfAttention, MultiHeadedSelfAttentionWithRelPos
from oasr.module.ffn import PositionwiseFeedForward
from oasr.encoder.utils import get_streaming_frame_mask
from oasr.module.sampling import SumOrMeanDownSampling, Conv1dDownSampling


logger = logging.getLogger(__name__)


class TransformerEncoderLayer(nn.Module):
    def __init__(self, n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,
                 normalize_before=False, concat_after=False, relative_positional=False, activation='relu'):
        super(TransformerEncoderLayer, self).__init__()

        self.relative_positional = relative_positional

        if self.relative_positional:
            self.slf_attn = MultiHeadedSelfAttentionWithRelPos(n_heads, d_model, slf_attn_dropout)
        else:
            self.slf_attn = MultiHeadedSelfAttention(n_heads, d_model, slf_attn_dropout)
        self.feed_forward = PositionwiseFeedForward(d_model, d_ff, ffn_dropout, activation)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)

        self.dropout1 = nn.Dropout(residual_dropout)
        self.dropout2 = nn.Dropout(residual_dropout)

        self.normalize_before = normalize_before
        self.concat_after = concat_after

        if self.concat_after:
            self.concat_linear = nn.Linear(d_model * 2, d_model)

    def forward(self, x, mask, pos=None):
        if self.normalize_before:
            x = self.norm1(x)
        residual = x

        if self.relative_positional:
            slf_attn_out, slf_attn_weights = self.slf_attn(x, mask, pos)
        else:
            slf_attn_out, slf_attn_weights = self.slf_attn(x, mask)
    
        if self.concat_after:
            x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))
        else:
            x = residual + self.dropout1(slf_attn_out)
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        x = residual + self.dropout2(self.feed_forward(x))
        if not self.normalize_before:
            x = self.norm2(x)

        return x, {'slf_attn_weights': slf_attn_weights}

    def inference(self, x, mask, pos=None, cache=None):
        if self.normalize_before:
            x = self.norm1(x)
        residual = x
        if self.relative_positional:
            slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, pos, cache)
        else:
            slf_attn_out, slf_attn_weights, new_cache = self.slf_attn.inference(x, mask, cache)

        if self.concat_after:
            x = residual + self.concat_linear(torch.cat((x, slf_attn_out), dim=-1))
        else:
            x = residual + slf_attn_out
        if not self.normalize_before:
            x = self.norm1(x)

        if self.normalize_before:
            x = self.norm2(x)
        residual = x
        x = residual + self.feed_forward(x)
        if not self.normalize_before:
            x = self.norm2(x)

        return x, new_cache, {'slf_attn_weights': slf_attn_weights}


class TransformerEncoder(BaseEncoder):
    def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout=0.0, 
                 slf_attn_dropout=0.0, ffn_dropout=0.0, residual_dropout=0.1, normalize_before=False,
                 concat_after=False, relative_positional=False, activation='relu'):
        super(TransformerEncoder, self).__init__()

        self.normalize_before = normalize_before
        self.relative_positional = relative_positional
        self.output_size = d_model

        self.pos_emb = PositionalEncoding(d_model, pos_dropout)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout,
                residual_dropout=residual_dropout, normalize_before=normalize_before,
                concat_after=concat_after, relative_positional=relative_positional, activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.norm = nn.LayerNorm(d_model)

    def _pos_encoding(self, inputs):
        if self.relative_positional:
            enc_output = inputs
            # [1, 2T - 1]
            position = torch.arange(-(inputs.size(1)-1), inputs.size(1), device=inputs.device).reshape(1, -1)
            pos = self.pos_emb._embedding_from_positions(position)
        else:  
            enc_output, pos = self.pos_emb(inputs)
        return enc_output, pos

    def forward(self, inputs, mask):
    
        enc_output, pos = self._pos_encoding(inputs)
        # enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)

        attn_weights = {}
        for i, block in enumerate(self.blocks):
            enc_output, attn_weight = block(enc_output, mask.unsqueeze(1), pos)
            attn_weights['enc_block_%d' % i] = attn_weight

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask, attn_weights

    def inference(self, inputs, mask, cache=None):
    
        # enc_output, pos = self.pos_emb.inference(inputs)
        enc_output, pos = self._pos_encoding(inputs)
        # enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)

        attn_weights = {}
        new_caches = []
        for i, block in enumerate(self.blocks):
            enc_output, new_cache, attn_weight = block.inference(enc_output, mask.unsqueeze(1), pos, cache)
            attn_weights['enc_block_%d' % i] = attn_weight
            new_caches.append(new_cache)

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask, new_caches, attn_weights


class StreamingTransformerEncoder(BaseEncoder):
    def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6,
                 left_context=5, right_context=0, pos_dropout=0.0, slf_attn_dropout=0.0,
                 ffn_dropout=0.0, residual_dropout=0.1, normalize_before=True, concat_after=False,
                 dynamic_attn_context=False, activation='relu', relative_positional=False, lookahead_steps=0):
        super(StreamingTransformerEncoder, self).__init__()

        self.left_context = left_context
        self.right_context = right_context
        self.output_size = d_model
        self.dynamic_attn_context = dynamic_attn_context
        self.relative_positional = relative_positional
        if self.dynamic_attn_context: logger.info('[StreamingTransformerEncoder] Dynamically adjust the context of MHA')

        if isinstance(self.left_context, list): assert len(self.left_context) == n_blocks
        if isinstance(self.right_context, list): assert len(self.right_context) == n_blocks

        self.normalize_before = normalize_before
        self.pos_emb = PositionalEncoding(d_model, pos_dropout)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,
                normalize_before=normalize_before, concat_after=concat_after, relative_positional=relative_positional, 
                activation=activation) for _ in range(n_blocks)
        ])

        if self.normalize_before:
            self.after_norm = nn.LayerNorm(d_model)

        self.lookahead_steps = lookahead_steps
        if self.lookahead_steps > 0:
            self.apply_look_ahead = True
            self.lookahead_conv = nn.Conv1d(
                    in_channels=d_model,
                    out_channels=d_model,
                    kernel_size=self.lookahead_steps + 1,
                    padding=0, stride=1, bias=False,
                    groups=d_model)
            logger.info('Apply Lookahead Step in Encoder And Set it to %d' % lookahead_steps)       
        else:
            self.apply_look_ahead = False 

    def _pos_encoding(self, inputs):
        if self.relative_positional:
            enc_output = inputs
            # [1, 2T - 1]
            position = torch.arange(-(inputs.size(1)-1), inputs.size(1), device=inputs.device).reshape(1, -1)
            pos = self.pos_emb._embedding_from_positions(position)
        else:  
            enc_output, pos = self.pos_emb(inputs)
        return enc_output, pos

    def forward(self, inputs, inputs_mask):

        enc_output, pos = self._pos_encoding(inputs)

        # enc_output.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        length = torch.sum(inputs_mask, dim=-1)

        apply_local_mask = random.random() < 0.5 if self.dynamic_attn_context and self.training else True

        attn_weights = {}
        for n, block in enumerate(self.blocks):

            if apply_local_mask:
                mask = get_streaming_frame_mask(
                    enc_output, length,
                    left_context=self.left_context[n] if isinstance(self.left_context, list) else self.left_context,
                    right_context=self.right_context[n] if isinstance(self.right_context, list) else self.right_context)
            else:
                mask = inputs_mask.unsqueeze(1)

            enc_output, attn_weight = block(enc_output, mask, pos)
            attn_weights['enc_block_%d' % n] = attn_weight

        if self.normalize_before:
            enc_output = self.after_norm(enc_output)

        if self.apply_look_ahead:
            if self.lookahead_steps > 0:
                enc_output = F.pad(enc_output, pad=(0, 0, 0, self.lookahead_steps), value=0.0)
            enc_output = enc_output.transpose(1, 2)
            enc_output = self.lookahead_conv(enc_output)
            enc_output = enc_output.transpose(1, 2)

        return enc_output, inputs_mask, attn_weights

    def inference(self, inputs, inputs_mask, cache=None, is_streaming=False):

        enc_output, pos = self._pos_encoding(inputs)

        # enc_output.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        length = torch.sum(inputs_mask, dim=-1)

        attn_weights = {}
        new_caches = []
        for n, block in enumerate(self.blocks):

            if is_streaming:
                mask = get_streaming_frame_mask(
                    enc_output, length,
                    left_context=self.left_context[n] if isinstance(self.left_context, list) else self.left_context,
                    right_context=self.right_context[n] if isinstance(self.right_context, list) else self.right_context)
            else:
                mask = inputs_mask.unsqueeze(1)

            enc_output, new_cache, attn_weight = block.inference(enc_output, mask, pos, cache)
            attn_weights['enc_block_%d' % n] = attn_weight
            new_caches.append(new_cache)

        if self.normalize_before:
            enc_output = self.after_norm(enc_output)

        if self.apply_look_ahead:
            if self.lookahead_steps > 0:
                enc_output = F.pad(enc_output, pad=(0, 0, 0, self.lookahead_steps), value=0.0)
            enc_output = enc_output.transpose(1, 2)
            enc_output = self.lookahead_conv(enc_output)
            enc_output = enc_output.transpose(1, 2)

        return enc_output, mask, new_caches, attn_weights


class PyramidTransformerEncoder(BaseEncoder):
    def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6, pos_dropout=0.0, 
                 slf_attn_dropout=0.0, ffn_dropout=0.0, residual_dropout=0.1, normalize_before=False,
                 concat_after=False, relative_positional=False, activation='relu', n_downsampling_layers=2, downsampling_type='conv'):
        super(PyramidTransformerEncoder, self).__init__()

        self.normalize_before = normalize_before
        self.relative_positional = relative_positional
        self.output_size = d_model
        self.n_downsampling_layers = n_downsampling_layers
        self.downsampling_type = downsampling_type
        assert n_blocks % n_downsampling_layers == 0
        self.pds = n_blocks // n_downsampling_layers

        self.pos_emb = PositionalEncoding(d_model, pos_dropout)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout,
                residual_dropout=residual_dropout, normalize_before=normalize_before,
                concat_after=concat_after, relative_positional=relative_positional, activation=activation) for _ in range(n_blocks)
        ])

        self.downsampling_blocks = nn.ModuleList(
            [
                SumOrMeanDownSampling(2) if self.downsampling_type == 'mean' else Conv1dDownSampling(3, d_model, 2) for _ in range(n_blocks // n_downsampling_layers)
            ]
        )
        logger.info('[PyramidTransformerEncoder] Apply %d %s downsampling layers.' % (n_downsampling_layers, downsampling_type))

        if self.normalize_before:
            self.norm = nn.LayerNorm(d_model)

    def _pos_encoding(self, inputs):
        if self.relative_positional:
            enc_output = inputs
            # [1, 2T - 1]
            position = torch.arange(-(inputs.size(1)-1), inputs.size(1), device=inputs.device).reshape(1, -1)
            pos = self.pos_emb._embedding_from_positions(position)
        else:  
            enc_output, pos = self.pos_emb(inputs)
        return enc_output, pos

    def forward(self, inputs, mask):
    
        enc_output, pos = self._pos_encoding(inputs)
        # enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)

        attn_weights = {}
        mask = mask.unsqueeze(1)
        for i, block in enumerate(self.blocks):
            enc_output, attn_weight = block(enc_output, mask, pos)
            attn_weights['enc_block_%d' % i] = attn_weight

            if (i + 1) % self.pds == 0:
                enc_output, mask = self.downsampling_blocks[(i+1) // self.pds](enc_output, mask)

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask.squeeze(1), attn_weights

    def inference(self, inputs, mask, cache=None):
    
        # enc_output, pos = self.pos_emb.inference(inputs)
        enc_output, pos = self._pos_encoding(inputs)
        # enc_output.masked_fill_(~mask.unsqueeze(2), 0.0)

        attn_weights = {}
        new_caches = []
        mask = mask.unsqueeze(1)
        for i, block in enumerate(self.blocks):
            enc_output, new_cache, attn_weight = block.inference(enc_output, mask, pos, cache)
            attn_weights['enc_block_%d' % i] = attn_weight
            new_caches.append(new_cache)

            if (i + 1) % self.pds == 0:
                enc_output, mask = self.downsampling_blocks[(i+1) // self.pds](enc_output, mask)

        if self.normalize_before:
            enc_output = self.norm(enc_output)

        return enc_output, mask.squeeze(1), new_caches, attn_weights


class PyramidStreamingTransformerEncoder(BaseEncoder):
    def __init__(self, d_model=256, n_heads=4, d_ff=2048, n_blocks=6,
                 left_context=5, right_context=0, pos_dropout=0.0, slf_attn_dropout=0.0,
                 ffn_dropout=0.0, residual_dropout=0.1, normalize_before=True, concat_after=False,
                 dynamic_attn_context=False, activation='relu', relative_positional=False, lookahead_steps=0,
                 n_downsampling_layers=2, downsampling_type='conv'):
        super(PyramidStreamingTransformerEncoder, self).__init__()

        self.left_context = left_context
        self.right_context = right_context
        self.output_size = d_model
        self.dynamic_attn_context = dynamic_attn_context
        self.relative_positional = relative_positional
        if self.dynamic_attn_context: logger.info('[StreamingTransformerEncoder] Dynamically adjust the context of MHA')
        self.n_downsampling_layers = n_downsampling_layers
        self.downsampling_type = downsampling_type
        assert n_blocks % n_downsampling_layers == 0
        self.pds = n_blocks // n_downsampling_layers

        if isinstance(self.left_context, list): assert len(self.left_context) == n_blocks
        if isinstance(self.right_context, list): assert len(self.right_context) == n_blocks

        self.normalize_before = normalize_before
        self.pos_emb = PositionalEncoding(d_model, pos_dropout)

        self.blocks = nn.ModuleList([
            TransformerEncoderLayer(
                n_heads, d_model, d_ff, slf_attn_dropout, ffn_dropout, residual_dropout,
                normalize_before=normalize_before, concat_after=concat_after, relative_positional=relative_positional, 
                activation=activation) for _ in range(n_blocks)
        ])

        self.downsampling_blocks = nn.ModuleList(
            [
                SumOrMeanDownSampling(2) if self.downsampling_type == 'mean' else Conv1dDownSampling(3, d_model, 2) for _ in range(n_blocks // n_downsampling_layers)
            ]
        )
        logger.info('[PyramidTransformerEncoder] Apply %d %s downsampling layers.' % (n_downsampling_layers, downsampling_type))


        if self.normalize_before:
            self.after_norm = nn.LayerNorm(d_model)

        self.lookahead_steps = lookahead_steps
        if self.lookahead_steps > 0:
            self.apply_look_ahead = True
            self.lookahead_conv = nn.Conv1d(
                    in_channels=d_model,
                    out_channels=d_model,
                    kernel_size=self.lookahead_steps + 1,
                    padding=0, stride=1, bias=False,
                    groups=d_model)
            logger.info('Apply Lookahead Step in Encoder And Set it to %d' % lookahead_steps)       
        else:
            self.apply_look_ahead = False 

    def _pos_encoding(self, inputs):
        if self.relative_positional:
            enc_output = inputs
            # [1, 2T - 1]
            position = torch.arange(-(inputs.size(1)-1), inputs.size(1), device=inputs.device).reshape(1, -1)
            pos = self.pos_emb._embedding_from_positions(position)
        else:  
            enc_output, pos = self.pos_emb(inputs)
        return enc_output, pos

    def forward(self, inputs, inputs_mask):

        enc_output, pos = self._pos_encoding(inputs)

        # enc_output.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        length = torch.sum(inputs_mask, dim=-1)

        apply_local_mask = random.random() < 0.5 if self.dynamic_attn_context and self.training else True

        attn_weights = {}
        inputs_mask = inputs_mask.unsqueeze(1)
        for n, block in enumerate(self.blocks):

            if apply_local_mask:
                length = torch.sum(inputs_mask.squeeze(1), dim=-1)
                mask = get_streaming_frame_mask(
                    enc_output, length,
                    left_context=self.left_context[n] if isinstance(self.left_context, list) else self.left_context,
                    right_context=self.right_context[n] if isinstance(self.right_context, list) else self.right_context)
            else:
                mask = inputs_mask

            enc_output, attn_weight = block(enc_output, mask, pos)
            attn_weights['enc_block_%d' % n] = attn_weight

            if (n + 1) % self.pds == 0:
                enc_output, inputs_mask = self.downsampling_blocks[(n+1) // self.pds](enc_output, inputs_mask)

        if self.normalize_before:
            enc_output = self.after_norm(enc_output)

        if self.apply_look_ahead:
            if self.lookahead_steps > 0:
                enc_output = F.pad(enc_output, pad=(0, 0, 0, self.lookahead_steps), value=0.0)
            enc_output = enc_output.transpose(1, 2)
            enc_output = self.lookahead_conv(enc_output)
            enc_output = enc_output.transpose(1, 2)

        return enc_output, inputs_mask, attn_weights

    def inference(self, inputs, inputs_mask, cache=None, is_streaming=False):

        enc_output, pos = self._pos_encoding(inputs)

        # enc_output.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        length = torch.sum(inputs_mask, dim=-1)

        attn_weights = {}
        new_caches = []
        inputs_mask = inputs_mask.unsqueeze(1)
        for n, block in enumerate(self.blocks):
            if is_streaming:
                length = torch.sum(inputs_mask.squeeze(1), dim=-1)
                mask = get_streaming_frame_mask(
                    enc_output, length,
                    left_context=self.left_context[n] if isinstance(self.left_context, list) else self.left_context,
                    right_context=self.right_context[n] if isinstance(self.right_context, list) else self.right_context)
            else:
                mask = inputs_mask

            enc_output, new_cache, attn_weight = block.inference(enc_output, mask, pos, cache)
            attn_weights['enc_block_%d' % n] = attn_weight
            new_caches.append(new_cache)

            if (n + 1) % self.pds == 0:
                enc_output, inputs_mask = self.downsampling_blocks[(n+1) // self.pds](enc_output, inputs_mask)

        if self.normalize_before:
            enc_output = self.after_norm(enc_output)

        if self.apply_look_ahead:
            if self.lookahead_steps > 0:
                enc_output = F.pad(enc_output, pad=(0, 0, 0, self.lookahead_steps), value=0.0)
            enc_output = enc_output.transpose(1, 2)
            enc_output = self.lookahead_conv(enc_output)
            enc_output = enc_output.transpose(1, 2)

        return enc_output, mask, new_caches, attn_weights