import math
import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F


_ACTIVATIONS = {
    'relu': F.relu,
    'tanh': F.tanh,
    'prelu': F.prelu
}


def cal_width_dim_2d(input_dim, kernel_size, stride, padding=1):
    return math.floor((input_dim + 2 * padding - kernel_size)/stride + 1)


class Conv2dLayer(nn.Module):
    def __init__(self, input_size, in_channel, out_channel, kernel_size, stride,
                 dropout=0.1, batch_norm=False, residual=False, act_func_type='relu'):
        super(Conv2dLayer, self).__init__()

        self.input_size = input_size
        self.in_channel = in_channel
        self.out_channel = out_channel

        self.batch_norm = batch_norm
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = kernel_size // 2 if isinstance(self.kernel_size, int) else kernel_size[1] // 2

        self.residual = residual

        self.act_func_type = act_func_type

        self.conv_layer = nn.Conv2d(
            in_channels=in_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size,
            stride=self.stride,
            padding=self.padding)

        self.output_size = cal_width_dim_2d(input_size,
            self.kernel_size if isinstance(self.kernel_size, int) else self.kernel_size[1],
            self.stride if isinstance(self.stride, int) else self.stride[1],
            padding=self.padding if isinstance(self.padding, int) else self.padding[1])

        if self.batch_norm:
            self.norm = nn.BatchNorm2d(out_channel)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        """Forward computation.

        Args:
            x (FloatTensor): `[B, C_i, T, F]`
            mask (IntTensor): `[B, 1, T]`
        Returns:
            out (FloatTensor): `[B, C_o, T', F']`
            out_mask (IntTensor): `[B, 1, T]`

        """
        residual = x

        out = self.conv_layer(x)
        out = _ACTIVATIONS[self.act_func_type](out)

        if self.batch_norm:
            out = self.norm(out)

        out = self.dropout(out)

        if self.residual and out.size() == residual.size():
            out += residual

        mask = self.return_output_mask(mask, out.size(2))

        return out, mask

    def return_output_mask(self, mask, t):
        # conv1
        stride = self.stride if isinstance(self.stride, int) else self.stride[0]
        mask = mask[:, 0::stride]
        return mask


class Conv2DFrontEnd(nn.Module):
    def __init__(self, input_size, output_size, in_channel=1, mid_channel=32,
                 out_channel=128, kernel_size=[[3,3],[3,3]], stride=[2, 2],
                 dropout=0.0, act_func_type='relu', front_end_layer_norm=False):
        super(Conv2DFrontEnd, self).__init__()

        self.kernel_size = kernel_size
        self.stride = stride
        self.output_size = output_size

        self.act_func_type = act_func_type
        self.front_end_layer_norm = front_end_layer_norm

        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == 2
        assert isinstance(self.stride, list) and len(self.stride) == 2

        self.conv1 = Conv2dLayer(
            input_size=input_size,
            in_channel=in_channel,
            out_channel=mid_channel,
            kernel_size=self.kernel_size[0],
            stride=self.stride[0],
            dropout=dropout,
            batch_norm=False,
            residual=False,
            act_func_type=act_func_type)

        self.conv2 = Conv2dLayer(
            self.conv1.output_size,
            in_channel=mid_channel,
            out_channel=out_channel,
            kernel_size=self.kernel_size[1],
            stride=self.stride[1],
            dropout=dropout,
            batch_norm=False,
            residual=False,
            act_func_type=act_func_type
        )

        self.conv_output_size = self.conv2.output_size * self.conv2.out_channel
        self.output_layer = nn.Linear(self.conv_output_size, self.output_size)

        if self.front_end_layer_norm:
            self.layer_norm = nn.LayerNorm(self.output_size)

    def forward(self, x, mask):
        
        x = x.unsqueeze(1)
        x, mask = self.conv1(x, mask)
        x, mask = self.conv2(x, mask)
        
        b, c, t, f = x.size()
        x = x.transpose(1, 2).reshape(b, t, c * f)
        x = self.output_layer(x)
        x = torch.masked_fill(x,mask.unsqueeze(2)==0, 0.0)


        if self.front_end_layer_norm:
            x = self.layer_norm(x)

        return x, mask

    def inference(self, x, mask, cache):

        x, mask = self.forward(x, mask)

        return x, mask, cache


class Deep2DConvFrontEnd(nn.Module):
    def __init__(self, input_size, output_size, in_channel, mid_channels, out_channel, kernel_size, stride,
                 nlayers, dropout=0.0, residual=False, front_end_layer_norm=False):
        super(Deep2DConvFrontEnd, self).__init__()

        self.kernel_size = kernel_size
        self.stride = stride
        self.output_size = output_size

        self.front_end_layer_norm = front_end_layer_norm

        assert isinstance(mid_channels, list) and len(mid_channels) == nlayers
        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == nlayers
        assert isinstance(self.stride, list) and len(self.stride) == nlayers

        self.layers = nn.ModuleList()

        last_output_size = input_size
        for i in range(nlayers):
            conv_layer = Conv2dLayer(
                    input_size=last_output_size,
                    in_channel=in_channel if i == 0 else mid_channels[i-1],
                    out_channel=out_channel if i == (nlayers-1) else mid_channels[i],
                    kernel_size=self.kernel_size[i],
                    stride=self.stride[i],
                    dropout=dropout,
                    residual=residual
                )
            last_output_size = conv_layer.output_size
            self.layers.append(conv_layer)
        
        self.conv_output_size = last_output_size * out_channel
        self.output_layer = nn.Linear(self.conv_output_size, self.output_size)

        if self.front_end_layer_norm:
            self.layer_norm = nn.LayerNorm(self.output_size)

    def forward(self, x, mask):

        x = x.unsqueeze(1)

        for _, conv in enumerate(self.layers):
            x, mask = conv(x, mask)

        b, c, t, f = x.size()
        x = x.transpose(1, 2).reshape(b, t, c * f)
        x = self.output_layer(x)

        x = torch.masked_fill(x,mask.unsqueeze(2)==0, 0.0)


        if self.front_end_layer_norm:
            x = self.layer_norm(x)

        return x, mask

    def inference(self, x, mask, cache):

        x, mask = self.forward(x, mask)

        return x, mask, cache
    

class Conv1dLayer(nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, stride, dilation=1, groups=1,
                 dropout=0.1, batch_norm=False, residual=False, act_func_type='relu'):
        super(Conv1dLayer, self).__init__()

        assert kernel_size % 2 == 1

        self.in_channel = in_channel
        self.out_channel = out_channel

        self.batch_norm = batch_norm
        self.kernel_size = kernel_size
        self.stride = stride
        self.dilation = dilation
        self.groups = groups
        self.padding = kernel_size // 2 * dilation

        self.residual = residual

        self.act_func_type = act_func_type

        self.conv_layer = nn.Conv1d(
            in_channels=in_channel,
            out_channels=out_channel,
            kernel_size=self.kernel_size,
            stride=self.stride,
            padding=self.padding,
            dilation=self.dilation, 
            groups=self.groups)

        self.output_size = out_channel

        if self.batch_norm:
            self.norm = nn.BatchNorm1d(out_channel)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        """Forward computation.

        Args:
            x (FloatTensor): `[B, Ci, L]`
            mask (IntTensor): `[B, T]`
        Returns:
            out (FloatTensor): `[B, C_o, L']`
            out_mask (IntTensor): `[B, T']`

        """
        residual = x

        out = self.conv_layer(x)
        out = _ACTIVATIONS[self.act_func_type](out)

        if self.batch_norm:
            out = self.norm(out)

        out = self.dropout(out)

        if self.residual and out.size() == residual.size():
            out += residual

        mask = self.return_output_mask(mask, out.size(2))
        return out, mask

    def return_output_mask(self, mask, t):

        mask = mask[:, 0::self.stride]
        return mask > 0


class Conv1DFrontEnd(nn.Module):
    def __init__(self, input_size, output_size, mid_channel=32, kernel_size=[3, 3], stride=[2, 2], dilation=[1, 1], groups=[1, 1],
                 batch_norm=False, dropout=0.0, act_func_type='relu', front_end_layer_norm=False):
        super(Conv1DFrontEnd, self).__init__()

        self.kernel_size = kernel_size
        self.stride = stride
        self.output_size = output_size
        self.dilation = dilation
        self.groups= groups

        self.act_func_type = act_func_type
        assert act_func_type in ['relu', 'tanh', 'prelu']
        self.front_end_layer_norm = front_end_layer_norm

        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == 2
        assert isinstance(self.stride, list) and len(self.stride) == 2
        assert isinstance(self.dilation, list) and len(self.dilation) == 2
        assert isinstance(self.groups, list) and len(self.groups) == 2

        self.conv1 = Conv1dLayer(
            in_channel=input_size,
            out_channel=mid_channel,
            kernel_size=self.kernel_size[0],
            stride=self.stride[0],
            dilation=self.dilation[0],
            groups=self.groups[0],
            dropout=dropout,
            batch_norm=batch_norm,
            residual=False,
            act_func_type=act_func_type)

        self.conv2 = Conv1dLayer(
            in_channel=mid_channel,
            out_channel=output_size,
            kernel_size=self.kernel_size[1],
            stride=self.stride[1],
            dilation=self.dilation[1],
            groups=self.groups[1],
            dropout=dropout,
            batch_norm=batch_norm,
            residual=False,
            act_func_type=act_func_type
        )

        if self.front_end_layer_norm:
            self.layer_norm = nn.LayerNorm(self.output_size)

    def forward(self, x, mask):
        
        x = x.transpose(1, 2)
        x, mask = self.conv1(x, mask)
        x, mask = self.conv2(x, mask)
        x = x.transpose(1, 2)
    
        x = torch.masked_fill(x, mask.unsqueeze(2)==0, 0.0)

        if self.front_end_layer_norm:
            x = self.layer_norm(x)

        return x, mask

    def inference(self, x, mask, cache):

        x, mask = self.forward(x, mask)

        return x, mask, cache


class Deep1DConvFrontEnd(nn.Module):
    def __init__(self, input_size, output_size, channels, kernel_size, stride,
                 nlayers, dilation=1, groups=1, dropout=0.0, residual=False, front_end_layer_norm=False):
        super(Deep1DConvFrontEnd, self).__init__()

        self.kernel_size = kernel_size
        self.stride = stride
        self.output_size = output_size
        self.dilation = dilation
        self.groups = groups

        self.front_end_layer_norm = front_end_layer_norm

        assert isinstance(channels, list) and len(channels) == nlayers
        assert isinstance(self.kernel_size, list) and len(self.kernel_size) == nlayers
        assert isinstance(self.stride, list) and len(self.stride) == nlayers

        self.layers = nn.ModuleList()

        for i in range(nlayers):
            conv_layer = Conv1dLayer(
                    in_channel=input_size if i == 0 else channels[i-1],
                    out_channel=output_size if i == (nlayers-1) else channels[i],
                    kernel_size=self.kernel_size[i],
                    stride=self.stride[i],
                    dilation=self.dilation[i] if isinstance(self.dilation, list) else self.dilation,
                    groups=self.groups[i] if isinstance(self.groups, list) else self.groups,
                    dropout=dropout,
                    residual=residual
                )
            self.layers.append(conv_layer)

        if self.front_end_layer_norm:
            self.layer_norm = nn.LayerNorm(self.output_size)

    def forward(self, x, mask):

        x = x.transpose(1, 2)

        for _, conv in enumerate(self.layers):
            x, mask = conv(x, mask)

        x = x.transpose(1, 2)

        x = torch.masked_fill(x, mask.unsqueeze(2)==0, 0.0)

        if self.front_end_layer_norm:
            x = self.layer_norm(x)

        return x, mask

    def inference(self, x, mask, cache):

        x, mask = self.forward(x, mask)

        return x, mask, cache
