import logging
import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.module.pos import PositionalEncoding
from oasr.module.conv import TwoLayerConv1dBlock, TwoLayerConv2dBlock, Conv2dLayer
from oasr.module.utils import get_length_mask
from oasr.module.initialization import initiailze_conv_params

logger = logging.getLogger(__name__)


class LinearWithPosEmbedding(nn.Module):
    def __init__(self, input_size, d_model, dropout_rate=0.0):
        super(LinearWithPosEmbedding, self).__init__()
        self.linear = nn.Linear(input_size, d_model)
        # self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout_rate)
        self.activation = nn.ReLU()
        self.pos_embedding = PositionalEncoding(d_model, dropout_rate)

    def forward(self, inputs, mask):

        inputs = self.linear(inputs)
        # inputs = self.norm(inputs)
        inputs = self.activation(self.dropout(inputs))
        
        encoded_inputs = self.pos_embedding(inputs)
        return encoded_inputs, mask


class Conv2dSubsampling(nn.Module):
    """Convolutional 2D subsampling (to 1/4 length)

    :param int idim: input dim
    :param int odim: output dim
    :param flaot dropout_rate: dropout rate
    """

    def __init__(self, idim, odim, dropout_rate=0.0, apply_initialization=False, pos_scale_learnable=False):
        super(Conv2dSubsampling, self).__init__()

        self.apply_initialization = apply_initialization

        self.conv = nn.Sequential(
            nn.Conv2d(1, odim, 3, 2),
            nn.ReLU(),
            nn.Conv2d(odim, odim, 3, 2),
            nn.ReLU()
        )
        self.out = nn.Sequential(
            nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),
            PositionalEncoding(odim, dropout_rate) if not pos_scale_learnable else ScaledPositionalEncoding(odim, dropout_rate)
        )

        if pos_scale_learnable:
            logger.info('Apply scale learnable pos embedding')

        if self.apply_initialization:
            self.init_parameters()

    def forward(self, x, x_mask):
        """Subsample x

        :param torch.Tensor x: input tensor
        :param torch.Tensor x_mask: input mask
        :return: subsampled x and mask
        :rtype Tuple[torch.Tensor, torch.Tensor]
        """
        x = x.unsqueeze(1)  # (b, c, t, f)
        x = self.conv(x)
        b, c, t, f = x.size()
        x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
        if x_mask is None:
            return x, None
        return x, x_mask[:, :, :-2:2][:, :, :-2:2]

    def inference(self, x, start_step=0):
        # x [1, time_step, model_size]
        x = x.unsqueeze(1)  # (b, c, t, f)
        x = self.conv(x)
        b, c, t, f = x.size()
        x = self.out._modules['0'](x.transpose(1, 2).contiguous().view(b, t, c * f))
        x = self.out._modules['1'].inference_chunk(x, start_step=start_step)
        return x

    def init_parameters(self):
        for n, p in self.named_parameters():
            initiailze_conv_params(n, p)
        logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)


class ConvFrontWithLayernorm(nn.Module):
    """Convolutional subsampling

    :param int idim: input dim
    :param int odim: output dim
    :param flaot dropout_rate: dropout rate
    """

    def __init__(self, input_dim, output_dim, conv_type='2d', kernel_size=[3, 3], stride=[2, 2],
                 conv_dropout_rate=0.0, layer_norm=True, apply_pos_embedding=True, pos_dropout_rate=0.0):
        super(ConvFrontWithLayernorm, self).__init__()

        self.conv_type = conv_type
        self.kernel_size = kernel_size
        self.stride = stride
        self.layer_norm = layer_norm
        self.apply_pos_embedding = apply_pos_embedding
        self.output_dim = output_dim
        self.apply_pos_embedding = apply_pos_embedding

        assert self.conv_type in ['1d', '2d']
        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == 2
        assert isinstance(self.stride, list) and len(self.stride) == 2

        if self.conv_type == '2d':
            self.conv_block = TwoLayerConv2dBlock(
                input_dim=input_dim, in_channel=1, out_channel=64,
                kernel_size=self.kernel_size, stride=self.stride,
                dropout=conv_dropout_rate, apply_layer_norm=layer_norm, residual=False
            )
        else:
            self.conv_block = TwoLayerConv1dBlock(
                in_channel=input_dim, mid_channel=output_dim // 2, out_channel=output_dim,
                kernel_size=self.kernel_size, stride=self.stride, dropout=conv_dropout_rate,
                apply_layer_norm=layer_norm, residual=False
            )


        self.conv_output_dim = self.conv_block.output_dim
        self.output_layer = nn.Linear(self.conv_output_dim, self.output_dim)

        if self.apply_pos_embedding:
            self.pos_embedding = PositionalEncoding(self.output_dim, pos_dropout_rate)

    def forward(self, x, x_mask):
        """Subsample inputs

        :param torch.Tensor inputs: x tensor
        :param torch.Tensor inputs_mask: x_mask
        :return: subsampled x and mask
        :rtype Tuple[torch.Tensor, torch.Tensor]
        """

        x_lens = torch.sum(x_mask, dim=-1)
        
        if self.conv_type == '1d':
            x, x_lens = self.conv_block(x, x_lens)
        else:
            x, x_lens = self.conv_block(x.unsqueeze(1), x_lens)
            b, c, t, f = x.size()
            x = x.transpose(1, 2).contiguous().view(b, t, c * f)
        
        x = self.output_layer(x)
        mask = get_length_mask(x, x_lens)

        if self.apply_pos_embedding:
            x = self.pos_embedding(x)

        return x, mask.unsqueeze(1)


class ConvFrontEnd(nn.Module):
    """Convolutional subsampling

    :param int idim: input dim
    :param int odim: output dim
    :param flaot dropout_rate: dropout rate
    """

    def __init__(self,
                 input_size,
                 output_size,
                 in_channel=1,
                 mid_channel=32,
                 out_channel=128,
                 kernel_size=[[3,3],[3,3]],
                 stride=[2, 2],
                 padding=[0, 0],
                 conv_dropout_rate=0.0,
                 act_func_type='relu',
                 front_end_layer_norm=False):
        super(ConvFrontEnd, self).__init__()

        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.output_size = output_size

        self.act_func_type = act_func_type
        self.front_end_layer_norm = front_end_layer_norm

        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == 2
        assert isinstance(self.stride, list) and len(self.stride) == 2
        assert isinstance(self.padding, list) and len(self.padding) == 2

        self.conv1 = Conv2dLayer(
            input_size=input_size,
            in_channel=in_channel,
            out_channel=mid_channel,
            kernel_size=self.kernel_size[0],
            stride=self.stride[0],
            padding=self.padding[0],
            dropout=conv_dropout_rate,
            norm_type='none',
            residual=False,
            act_func_type=act_func_type)

        self.conv2 = Conv2dLayer(
            self.conv1.output_size,
            in_channel=mid_channel,
            out_channel=out_channel,
            kernel_size=self.kernel_size[1],
            stride=self.stride[1],
            padding=self.padding[1],
            dropout=conv_dropout_rate,
            norm_type='none',
            residual=False,
            act_func_type=act_func_type
        )

        self.conv_output_size = self.conv2.output_size * self.conv2.out_channel
        self.output_layer = nn.Linear(self.conv_output_size, self.output_size)

        if self.front_end_layer_norm:
            self.layer_norm = nn.LayerNorm(self.output_size)

    def forward(self, x, x_mask):
        """Subsample inputs

        :param torch.Tensor inputs: x tensor
        :param torch.Tensor inputs_mask: x_mask
        :return: subsampled x and mask
        :rtype Tuple[torch.Tensor, torch.Tensor]
        """

        x_lens = torch.sum(x_mask, dim=-1)
        
        x = x.unsqueeze(1)
        x, x_lens = self.conv1(x, x_lens)
        x, x_lens = self.conv2(x, x_lens)
        
        b, c, t, f = x.size()
        x = x.transpose(1, 2).contiguous().view(b, t, c * f)

        x = self.output_layer(x)
        mask = get_length_mask(x, x_lens)

        if self.front_end_layer_norm:
            x = self.layer_norm(x)

        return x, mask.unsqueeze(1)


class StackFeatureFrontEnd(nn.Module):
    def __init__(self, input_size, left_frames, right_frames, frame_rate=30):
        super(StackFeatureFrontEnd, self).__init__()

        self.left_frames = left_frames
        self.right_frames = right_frames
        self.frame_rate = frame_rate

        self.stride = int(self.frame_rate / 10)
        self.input_size = input_size
        self.nframes = self.left_frames + self.right_frames + 1
        self.output_size = self.nframes * input_size

        self.window = nn.Unfold(kernel_size=(self.nframes, self.input_size), stride=self.stride, padding=0)

    def forward(self, inputs, inputs_length):

        feat_len = inputs.size(1)
        if (feat_len - self.nframes) % self.stride != 0:
            pad_len = self.stride - (feat_len - self.nframes) % self.stride
            inputs = F.pad(inputs, pad=(0, 0, 0, pad_len), value=0.0)
        else:
            pad_len = 0

        with torch.no_grad():
            output = self.window(inputs.unsqueeze(1))
            output = output.transpose(1, 2)
            
        output_len = self.return_output_lens(inputs_length, pad_len)

        return output, output_len

    def return_output_lens(self, length, pad_len):
        return torch.floor((length.float() + pad_len - self.nframes) / self.stride + 1).long()