#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# Copyright 2019 Mobvoi Inc. All Rights Reserved.
"""Refactored pure encoder v2 which takes chunk mask as input"""
from typing import Tuple

import logging
import random
import torch
import torch.nn.functional as F
from typeguard import check_argument_types

from wenet.models.moe_comformer.attention import MultiHeadedAttention
from wenet.models.moe_comformer.attention import RelPositionMultiHeadedAttention
from wenet.models.moe_comformer.convolution import ConvolutionModule
from wenet.models.moe_comformer.encoder_layer import (TransformerEncoderLayer,
                                               ConformerEncoderLayer)
from wenet.models.moe_comformer.positionwise_feed_forward import init_ff
from wenet.models.moe_comformer.common import get_activation
from wenet.models.moe_comformer.epoch_based_activator import (simple_factory,
                                                       EpochBasedActivator)
from wenet.models.moe_comformer.mask import subsequent_chunk_mask2, make_pad_mask
from wenet.models.moe_comformer.common import mask_to_bias


class LeftSubsampler(torch.nn.Module):
    def __init__(self, subsample_factor=1, mask_prob=0, prob=0, **kwargs):
        super().__init__()
        self.subsample_factor = subsample_factor
        self.mask_prob = mask_prob
        self.prob = simple_factory(EpochBasedActivator, prob)
        self.reset_stat()

    def set_epoch(self, epoch: int):
        self.prob.set_epoch(epoch)
        self.reset_stat()

    def enabled(self) -> bool:
        return (self.subsample_factor > 1 or self.mask_prob > 0) and self.prob.enabled()

    def get_subsample_factor(self) -> float:
        factor = self.subsample_factor if self.prob.activated() else 1
        self.num_batches += 1
        self.accum_factor += factor
        return factor

    def forward(self, size, chunk_size, device=None) -> torch.Tensor:
        if not self.enabled() or self.training and not self.prob.activated():
            return True  # not mask at all
        arange = torch.arange(size, device=device)
        ori = arange.expand(size, size)
        mask = ori >= (arange // chunk_size * chunk_size).unsqueeze(-1)
        if self.subsample_factor > 1:
            mask |= (ori + arange.reshape(size, -1)) % self.subsample_factor == 0
        if self.mask_prob > 0:
            mask |= torch.rand(size, size, device=device) >= self.mask_prob
        return mask

    def reset_stat(self):
        self.accum_factor = 0
        self.num_batches = 0

    def gather_stat_info(self, infos: list):
        infos.append(f'avg_subsample={self.accum_factor/self.num_batches:.2f}')
        self.reset_stat()

    def __str__(self):
        return f'(subsample_factor={self.subsample_factor}, prob={self.prob})'


class ChunkMasker(torch.nn.Module):
    def __init__(self, prob=0,
                 max_chunk_size: int = 25,
                 dynamic_left_chunks: bool = False,
                 left_subsampler_conf: dict = None):
        super().__init__()
        self.prob = simple_factory(EpochBasedActivator, prob)
        self.max_chunk_size = max_chunk_size
        self.dynamic_left_chunks = dynamic_left_chunks
        if left_subsampler_conf:
            self.left_subsampler = LeftSubsampler(**left_subsampler_conf)
        else:
            self.left_subsampler = None

    def set_epoch(self, epoch):
        if self.left_subsampler:
            self.left_subsampler.set_epoch(epoch)
            if self.left_subsampler.enabled():
                logging.info(f'Set left subsampler to {self.left_subsampler}'
                             f' for epoch {epoch}.')

    def gather_stat_info(self, infos: list):
        if self.left_subsampler and self.left_subsampler.enabled():
            self.left_subsampler.gather_stat_info(infos)

    def get_chunk_pair(self, max_len, chunk_len=0, left_chunks=-1):
        left = -1
        if self.training and self.prob.activated():
            chunk = random.randint(1, min(max_len // 2, self.max_chunk_size))
            if self.dynamic_left_chunks:
                left = random.randint(0, max_len // chunk - 1)
        elif chunk_len > 0:
            chunk = chunk_len
            left = left_chunks
        else:
            chunk = max_len
        return chunk, left

    def forward(self, masks, chunk_size=0, left_chunks=-1):
        max_len = masks.size(-1)
        if self.training and self.prob.activated():
            chunk_size = random.randint(
                1, min(max_len // 2, self.max_chunk_size))
            if self.dynamic_left_chunks:
                left_chunks = random.randint(0, max_len // chunk_size - 1)
        # Only add chunk mask when necessary.
        chunk_masks = subsequent_chunk_mask2(
            max_len, chunk_size, left_chunks, device=masks.device)
        if self.left_subsampler:
            chunk_masks &= self.left_subsampler(
                max_len, chunk_size, device=masks.device)
        return masks & chunk_masks.unsqueeze(0)


""" Pure encoder, without embedding layer. """
class BaseEncoder(torch.nn.Module):
    def __init__(
        self,
        output_size: int = 256,
        attention_heads: int = 4,
        linear_units: int = 2048,
        num_blocks: int = 6,
        dropout_rate: float = 0.1,
        attention_dropout_rate: float = 0.0,
        normalize_before: bool = True,
        concat_after: bool = False,
        use_chunk_mask: bool = False,
        use_sdpa: bool = False,
        **kwargs,
    ):
        """
        Args:
            input_size (int): input dim
            output_size (int): dimension of attention
            attention_heads (int): the number of heads of multi head attention
            linear_units (int): the hidden units number of position-wise feed
                forward
            num_blocks (int): the number of decoder blocks
            dropout_rate (float): dropout rate
            attention_dropout_rate (float): dropout rate in attention
            normalize_before (bool):
                True: use layer_norm before each sub-block of a layer.
                False: use layer_norm after each sub-block of a layer.
            concat_after (bool): whether to concat attention layer's input
                and output.
                True: x -> x + linear(concat(x, att(x)))
                False: x -> x + att(x)
            use_chunk_mask (bool): whether use given chunk mask
        """
        assert check_argument_types()
        super().__init__()
        self._output_size = output_size
        self.normalize_before = normalize_before
        self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
        self.use_chunk_mask = use_chunk_mask
        self.use_sdpa = use_sdpa

    def output_size(self) -> int:
        return self._output_size

    def num_layers(self) -> int:
        return len(self.encoders)

    def set_epoch(self, epoch):
        for layer in self.encoders:
            layer.set_epoch(epoch)

    def gather_stat_info(self, infos: list):
        pass

    def get_debug_stat(self, infos: list, name=''):
        for i, layer in enumerate(self.encoders):
            if hasattr(layer, 'get_debug_stat'):
                layer.get_debug_stat(infos, f'{name}.{i}')

    def forward(
        self,
        xs: torch.Tensor,
        pos_emb: torch.Tensor,
        masks: torch.Tensor,
        chunk_masks: torch.Tensor = None,
        use_chunk_mask: bool = None
    ) -> torch.Tensor:
        max_len = xs.size(1)
        if use_chunk_mask is None:
            use_chunk_mask = self.use_chunk_mask
        if chunk_masks is None or not use_chunk_mask:
            chunk_masks = masks
        if self.use_sdpa:
            chunk_masks = mask_to_bias(chunk_masks, xs.dtype)
        for layer in self.encoders:
            xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, masks)
        if self.normalize_before:
            xs = self.after_norm(xs)
        return xs

    def forward_incremental(
        self,
        xs: torch.Tensor,
        pos_emb: torch.Tensor,
        pad_mask: torch.Tensor,
        att_caches: torch.Tensor,
        att_lengths: torch.Tensor,
        cnn_caches: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        # $$num_layers$$ is placed at dim-2 for the convenience of managing
        # attention caches outside in ORT.
        att_caches = att_caches.reshape(
            xs.size(0), -1, self.num_layers(), self.output_size())
        cnn_caches = cnn_caches.reshape(xs.size(0), self.num_layers(), -1)
        # attention history masks
        cache_mask = ~make_pad_mask(
            att_lengths, att_caches.size(1), reverse=True).unsqueeze(1)  # (B, 1, T1)
        # full attention masks
        att_mask = torch.cat(
            (cache_mask.expand(-1, pad_mask.size(1), -1), pad_mask), dim=-1)
        r_att_caches = []
        r_cnn_caches = []
        if self.use_sdpa:
            att_mask = mask_to_bias(att_mask, xs.dtype)
        for i, layer in enumerate(self.encoders):
            xs, new_att_cache, new_cnn_cache = layer.forward_incremental(
                xs, pos_emb, att_mask, pad_mask,
                att_caches[:,:,i], cnn_caches[:,i])
            r_att_caches.append(new_att_cache.unsqueeze(2))
            r_cnn_caches.append(new_cnn_cache.unsqueeze(1))
        if self.normalize_before:
            xs = self.after_norm(xs)
        return xs, r_att_caches, r_cnn_caches


class TransformerEncoder(BaseEncoder):
    """Transformer encoder module."""
    def __init__(
        self,
        output_size: int = 256,
        attention_heads: int = 4,
        linear_units: int = 2048,
        num_blocks: int = 6,
        dropout_rate: float = 0.1,
        attention_dropout_rate: float = 0.0,
        normalize_before: bool = True,
        concat_after: bool = False,
        use_positional_embedding: bool = False,
        activation_type: str = "swish",
        use_chunk_mask: bool = False,
        use_sdpa: bool = False,
        ff_moe_conf: dict = None,
        **kwargs,
    ):
        """ Construct TransformerEncoder

        See Encoder for the meaning of each parameter.
        """
        assert check_argument_types()
        super().__init__(output_size, attention_heads, linear_units, num_blocks,
                         dropout_rate, attention_dropout_rate, normalize_before,
                         concat_after, use_chunk_mask, use_sdpa)
        selfattn = (RelPositionMultiHeadedAttention if use_positional_embedding
                    else MultiHeadedAttention)
        activation = get_activation(activation_type)
        self.encoders = torch.nn.ModuleList([
            TransformerEncoderLayer(
                output_size,
                selfattn(attention_heads, output_size, attention_dropout_rate,
                         use_sdpa=use_sdpa),
                init_ff(output_size, linear_units, dropout_rate, activation,
                        moe_conf=ff_moe_conf),
                dropout_rate,
                normalize_before,
                concat_after,
            ) for _ in range(num_blocks)
        ])


class ConformerEncoder(BaseEncoder):
    """Conformer encoder module."""
    def __init__(
        self,
        output_size: int = 256,
        attention_heads: int = 4,
        linear_units: int = 2048,
        num_blocks: int = 6,
        dropout_rate: float = 0.1,
        attention_dropout_rate: float = 0.0,
        normalize_before: bool = True,
        concat_after: bool = False,
        use_positional_embedding: bool = False,
        positionwise_conv_kernel_size: int = 1,
        macaron_style: bool = True,
        selfattention_layer_type: str = "rel_selfattn",
        activation_type: str = "swish",
        use_cnn_module: bool = True,
        cnn_module_kernel: int = 15,
        causal: bool = False,
        cnn_module_norm: str = "batch_norm",
        use_chunk_mask: bool = False,
        use_sdpa: bool = False,
        ff_moe_conf: dict = None,
        **kwargs,
    ):
        """Construct ConformerEncoder

        Args:
            input_size to use_dynamic_chunk, see in BaseEncoder
            positionwise_conv_kernel_size (int): Kernel size of positionwise
                conv1d layer.
            macaron_style (bool): Whether to use macaron style for
                positionwise layer.
            selfattention_layer_type (str): Encoder attention layer type,
                the parameter has no effect now, it's just for configure
                compatibility.
            activation_type (str): Encoder activation function type.
            use_cnn_module (bool): Whether to use convolution module.
            cnn_module_kernel (int): Kernel size of convolution module.
            causal (bool): whether to use causal convolution or not.
        """
        assert check_argument_types()
        super().__init__(output_size, attention_heads, linear_units, num_blocks,
                         dropout_rate, attention_dropout_rate, normalize_before,
                         concat_after, use_chunk_mask, use_sdpa)
        activation = get_activation(activation_type)
        # self-attention module definition
        selfattn = (RelPositionMultiHeadedAttention
            if use_positional_embedding else MultiHeadedAttention)
        selfattn_args = (attention_heads, output_size, attention_dropout_rate)
        # convolution module definition
        cnn = ConvolutionModule
        cnn_args = (output_size, cnn_module_kernel, activation,
                    cnn_module_norm, causal)
        self.encoders = torch.nn.ModuleList([
            ConformerEncoderLayer(
                output_size,
                selfattn(*selfattn_args, use_sdpa=use_sdpa),
                init_ff(output_size, linear_units, dropout_rate, activation,
                        moe_conf=ff_moe_conf),
                init_ff(output_size, linear_units, dropout_rate, activation,
                        moe_conf=ff_moe_conf) if macaron_style else None,
                cnn(*cnn_args) if use_cnn_module else None,
                dropout_rate,
                normalize_before,
                concat_after,
            ) for _ in range(num_blocks)
        ])


class MixtureOfEncodings(torch.nn.Module):
    def __init__(self, dim: int, **kwargs):
        assert check_argument_types()
        super().__init__()
        self.cross_attn = RelPositionMultiHeadedAttention(int(dim / 64), dim, 0)
        self.fc = torch.nn.Linear(dim + dim, 2)

    def forward(self, xs, src, masks, pos_emb):
        xs_att = self.cross_attn(xs, src, src, masks, pos_emb)
        xs_concat = torch.cat((xs, xs_att), dim=-1)
        gating = F.softmax(self.fc(xs_concat), dim=-1)
        return gating[:, :, :1] * xs + gating[:, :, 1:] * src
