import math
import oneflow as torch
import logging
import oneflow.nn as nn
from oasr.module.norm import LayerNorm2D
from oasr.module.act import activations


def cal_width_dim_2d(input_dim, kernel_size, stride, padding=1):
    return math.floor((input_dim + 2 * padding - kernel_size)/stride + 1)


class Conv2dLayer(nn.Module):
    def __init__(self, input_size, in_channel, out_channel, kernel_size, stride, padding,
                 dropout=0.1, norm_type='none', residual=False, act_func_type='relu'):
        super(Conv2dLayer, self).__init__()

        self.input_size = input_size
        self.in_channel = in_channel
        self.out_channel = out_channel

        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding

        self.norm_type = norm_type #[batch_norm, layer_norm, none]
        self.residual = residual
        assert self.norm_type in ['layer_norm', 'batch_norm', 'none']

        self.act_func_type = act_func_type

        self.conv_layer = nn.Conv2d(
            in_channels=in_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size,
            stride=self.stride,
            padding=self.padding)

        self.width = cal_width_dim_2d(input_size,
            self.kernel_size if isinstance(self.kernel_size, int) else self.kernel_size[1],
            self.stride if isinstance(self.stride, int) else self.stride[1],
            padding=self.padding if isinstance(self.padding, int) else self.padding[1])

        if self.norm_type == 'layer_norm':
            self.norm = LayerNorm2D(self.out_channel * self.width)
            logging.info('Apply LayerNorm2D.')
        elif self.norm_type == 'batch_norm':
            self.norm = nn.BatchNorm2d(out_channel)
            logging.info('Apply BatchNorm2D.')
        else:
            pass

        self.dropout = nn.Dropout(dropout)

    def forward(self, inputs, input_lens=None):
        """Forward computation.

        Args:
            inputs (FloatTensor): `[B, C_i, T, F]`
            input_lens (IntTensor): `[B]`
        Returns:
            inputs (FloatTensor): `[B, C_o, T', F']`
            input_lens (IntTensor): `[B]`

        """
        residual = inputs

        outputs = self.conv_layer(inputs)
        outputs = activations[self.act_func_type](outputs)

        if self.norm_type != 'none':
            outputs = self.norm(outputs)

        outputs = self.dropout(outputs)

        if self.residual and outputs.size() == residual.size():
            outputs += residual

        output_lens = self.return_output_lenth(input_lens)

        return outputs, output_lens

    def return_output_lenth(self, input_lens):
        # conv1
        output_lens = input_lens.float()
        padding = self.padding if isinstance(self.padding, int) else self.padding[0]
        stride = self.stride if isinstance(self.stride, int) else self.stride[0]
        kernel_size = self.kernel_size if isinstance(self.kernel_size, int) else self.kernel_size[0]
        output_lens = torch.floor((output_lens + 2 * padding - kernel_size) / stride + 1)

        return output_lens.int()

    @property
    def output_size(self):
        return self.width



class TwoLayerConv1dBlock(nn.Module):
    """1d-CNN block."""

    def __init__(self, in_channel, mid_channel, out_channel, kernel_size, stride, dropout=0.1,
                 apply_layer_norm=True, residual=False, act_func_type='relu'):

        super(TwoLayerConv1dBlock, self).__init__()

        self.apply_layer_norm = apply_layer_norm
        self.residual = residual
        self.act_func_type = act_func_type
        self.dropout = nn.Dropout(p=dropout)

        # 1st layer
        self.kernel_size1 = kernel_size[0] if isinstance(kernel_size, list) else kernel_size
        self.stride1 = stride[0] if isinstance(stride, list) else stride

        self.conv1 = nn.Conv1d(
            in_channels=in_channel,
            out_channels=mid_channel,
            kernel_size=self.kernel_size1,
            stride=self.stride1,
            padding=1)
        
        if self.apply_layer_norm:
            self.layer_norm1 = nn.LayerNorm(mid_channel)

        # 2nd layer
        self.kernel_size2 = kernel_size[1] if isinstance(kernel_size, list) else kernel_size
        self.stride2 = stride[1] if isinstance(stride, list) else stride

        self.conv2 = nn.Conv1d(
            in_channels=mid_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size2,
            stride=self.stride2,
            padding=1)
        
        if self.apply_layer_norm:
            self.layer_norm2 = nn.LayerNorm(out_channel)

        self.output_dim = out_channel

    def forward(self, inputs, input_lens):
        """Forward computation.

        Args:
            inputs (FloatTensor): `[B, T, F]`
            input_lens (IntTensor): `[B]`
        Returns:
            inputs (FloatTensor): `[B, T, F']`
            input_lens (IntTensor): `[B]`

        """
        residual = inputs

        inputs = self.conv1(inputs.transpose(2, 1)).transpose(2, 1)
        inputs = activations[self.act_func_type](inputs)

        if self.apply_layer_norm:
            inputs = self.layer_norm1(inputs)

        inputs = torch.relu(inputs)
        inputs = self.dropout(inputs)

        inputs = self.conv2(inputs.transpose(2, 1)).transpose(2, 1)
        inputs = activations[self.act_func_type](inputs)

        if self.apply_layer_norm:
            inputs = self.layer_norm2(inputs)

        if self.residual and inputs.size() == residual.size():
            inputs += residual  # NOTE: this is the same place as in ResNet

        inputs = self.dropout(inputs)

        outputs = inputs
        output_lens = self.return_output_lenth(input_lens)

        return outputs, output_lens

    def return_output_lenth(self, input_lens):

        # conv1
        padding1 = 1
        padding2 = 1
        output_lens = input_lens.float()
        output_lens = torch.floor((output_lens + 2 * padding1 - self.kernel_size1) / self.stride1 + 1)
        output_lens = torch.floor((output_lens + 2 * padding2 - self.kernel_size2) / self.stride2 + 1)

        return output_lens.int()


class TwoLayerConv2dBlock(nn.Module):
    """2d-CNN block."""

    def __init__(self, input_dim, in_channel, out_channel, kernel_size, stride,
                 dropout=0.1, apply_layer_norm=True, residual=False, act_func_type='relu'):
        super(TwoLayerConv2dBlock, self).__init__()

        self.apply_layer_norm = apply_layer_norm
        self.act_func_type = act_func_type

        self.residual = residual
        self.dropout = nn.Dropout(p=dropout)

        self.kernel_size1 = kernel_size[0] if isinstance(kernel_size, list) else kernel_size
        self.stride1 = stride[0] if isinstance(stride, list) else stride

        # 1st layer
        self.conv1 = nn.Conv2d(
            in_channels=in_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size1,
            stride=self.stride1,
            padding=(1, 1))

        self.width1 = cal_width_dim_2d(
            input_dim, self.kernel_size1,
            self.stride1 if isinstance(self.stride1, int) else self.stride1[1])
        if self.apply_layer_norm:
            self.layer_norm1 = LayerNorm2D(out_channel * self.width1)

        self.kernel_size2 = kernel_size[1] if isinstance(kernel_size, list) else kernel_size
        self.stride2 = stride[1] if isinstance(stride, list) else stride

        # 2nd layer
        self.conv2 = nn.Conv2d(
            in_channels=out_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size2,
            stride=self.stride2,
            padding=(1, 1))

        self.width2 = cal_width_dim_2d(
            self.width1, self.kernel_size2,
            self.stride2 if isinstance(self.stride2, int) else self.stride2[1])
        if self.apply_layer_norm:
            self.layer_norm2 = LayerNorm2D(out_channel * self.width2)

        self.output_dim = out_channel * self.width2

    def forward(self, inputs, input_lens=None):
        """Forward computation.

        Args:
            inputs (FloatTensor): `[B, C_i, T, F]`
            input_lens (IntTensor): `[B]`
        Returns:
            inputs (FloatTensor): `[B, C_o, T', F']`
            input_lens (IntTensor): `[B]`

        """
        residual = inputs

        inputs = self.conv1(inputs)
        inputs = activations[self.act_func_type](inputs)

        if self.apply_layer_norm:
            inputs = self.layer_norm1(inputs)

        inputs = self.dropout(inputs)

        inputs = self.conv2(inputs)
        inputs = activations[self.act_func_type](inputs)

        if self.apply_layer_norm:
            inputs = self.layer_norm2(inputs)

        if self.residual and inputs.size() == residual.size():
            inputs += residual  # NOTE: this is the same place as in ResNet

        inputs = torch.relu(inputs)
        inputs = self.dropout(inputs)

        outputs = inputs
        output_lens = self.return_output_lenth(input_lens)

        return outputs, output_lens

    def return_output_lenth(self, input_lens):

        # conv1
        padding1 = 1
        padding2 = 1
        output_lens = input_lens.float()
        stride1 = self.stride1 if isinstance(self.stride1, int) else self.stride1[0]
        stride2 = self.stride2 if isinstance(self.stride2, int) else self.stride2[0]
        output_lens = torch.floor((output_lens + 2 * padding1 - self.kernel_size1) / stride1 + 1)
        output_lens = torch.floor((output_lens + 2 * padding2 - self.kernel_size2) / stride2 + 1)

        return output_lens.int()


class SELayer(nn.Module):
    def __init__(self, channel, reduction=16):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)



# class CausalConv1d(nn.Module):
    # """1D dilated causal convolution.

    # Args:
    #     in_channels (int): input channel size
    #     out_channels (int): output channel size
    #     kernel_size (int): kernel size
    #     dilation (int): delation rate
    #     param_init (str): parameter initialization method

    # """

    # def __init__(self, in_channels, out_channels, kernel_size, dilation=1,
    #              param_init=''):

    #     super().__init__()

    #     self.padding = (kernel_size - 1) * dilation
    #     self.conv1d = nn.Conv1d(in_channels, out_channels, kernel_size,
    #                             padding=self.padding, dilation=dilation)

    #     if param_init == 'xavier_uniform':
    #         self.reset_parameters()
    #     else:
    #         logger.info('Parameter initialization is skipped.')

    # def reset_parameters(self):
    #     for n, p in self.named_parameters():
    #         init_with_xavier_uniform(n, p)

    # def forward(self, xs):
    #     """Forward pass.

    #     Args:
    #         xs (FloatTensor): `[B, T, C_in]`
    #     Returns:
    #         xs (FloatTensor): `[B, T, C_out]`

    #     """
    #     xs = xs.transpose(2, 1)
    #     xs = self.conv1d(xs)
    #     if self.padding != 0:
    #         xs = xs[:, :, :-self.padding]
    #     xs = xs.transpose(2, 1).contiguous()
    #     return xs


# class Separable1DConv(nn.Module):
#     def __init__(self, in_channels, mid_channels, out_channels, kernel_size, stride=1, padding=0):
        
#         self.conv = nn.Sequential(
#             nn.Conv1d(in_channels, mid_channels, kernel_size, stride, padding),
#             nn.LayerNorm(mid_channels),
#             nn.ReLU6(),
#             nn.Conv1d(1, out_channels, kernel_size=1, stride=1, padding=0),
#             nn.LayerNorm(out_channels),
#             nn.ReLU()
#         )

#     def forward(self, inputs):

#         outputs = self.conv(inputs)
        
#         return outputs


# class Separable2DConv(nn.Module):
    # def __init__(self, in_channels, mid_channels, out_channels, kernel_size, stride=1, padding=0):
        
    #     self.conv = nn.Sequential(
    #         nn.Conv2d(in_channels, mid_channels, kernel_size, stride, padding),
    #         nn.BatchNorm2d(mid_channels),
    #         nn.ReLU6(),
    #         nn.Conv2d(1, out_channels, kernel_size=1, stride=1, padding=0),
    #         nn.BatchNorm2d(out_channels),
    #         nn.ReLU()
    #     )

    # def forward(self, inputs):

    #     outputs = self.conv(inputs)
        
    #     return outputs
