import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F
from oasr.module.utils import _ACTIVATION


class ConformerConvolutionModule(nn.Module):
    def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
        super(ConformerConvolutionModule, self).__init__()

        assert kernel_size % 2 == 1

        self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)

        self.depthwise_conv = nn.Conv1d(
            channels,
            channels,
            kernel_size,
            stride=1,
            padding=(kernel_size - 1) // 2,
            groups=channels,
            bias=bias
        )

        self.batch_norm = nn.BatchNorm1d(channels)

        self.pointwise_conv2 = nn.Linear(channels, channels, bias=bias)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        """
        Args:
            x: [batch_size, time, channels]
            mask: [batch_size, time]
        """
        mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])

        x = self.pointwise_conv1(x)
        x = F.glu(x)
        x = torch.masked_fill(x,mask==0, 0.0)

        x = x.transpose(1, 2)
        x = self.depthwise_conv(x)
        x = self.batch_norm(x)
        x = x * torch.sigmoid(x) # swish
        x = x.transpose(1, 2)

        x = self.pointwise_conv2(x)
        x = torch.masked_fill(x,mask==0, 0.0)

        return x


class UniDirectionalConformerConvolutionModule(nn.Module):
    def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
        super(UniDirectionalConformerConvolutionModule, self).__init__()

        self.kernel_size = kernel_size

        self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)

        self.depthwise_conv = nn.Conv1d(
            channels,
            channels,
            kernel_size,
            stride=1,
            padding=0,
            groups=channels,
            bias=bias
        )

        self.batch_norm = nn.BatchNorm1d(channels)

        self.pointwise_conv2 = nn.Linear(channels, channels, bias=True)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        """
        Args:
            x: [batch_size, time, channels]
            mask: [batch_size, time]
        """
        mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])
        
        x = self.pointwise_conv1(x)
        x = F.glu(x)
        x = torch.masked_fill(x,mask==0, 0.0)

        x = F.pad(x, pad=(0, 0, self.kernel_size - 1, 0), value=0.0).transpose(1, 2)
        x = self.depthwise_conv(x)
        x = self.batch_norm(x)
        x = x * torch.sigmoid(x)
        x = x.transpose(1, 2)

        x = self.pointwise_conv2(x)
        x = torch.masked_fill(x,mask==0, 0.0)

        return x

    def inference(self):
        raise NotImplementedError


class StreamingConformerConvolutionModule(nn.Module):
    def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
        super(StreamingConformerConvolutionModule, self).__init__()

        self.kernel_size = kernel_size
        assert kernel_size % 2 == 1

        self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)

        self.depthwise_conv = nn.Conv1d(
            channels,
            channels,
            kernel_size,
            stride=1,
            padding=0,
            groups=channels,
            bias=bias
        )

        self.batch_norm = nn.BatchNorm1d(channels)

        self.pointwise_conv2 = nn.Linear(channels, channels, bias=True)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask=None, right_context=0):
        """
        Args:
            x: [batch_size, time, channels]
            mask: [batch_size, time]
            right_context:
        """

        assert 0 <= right_context <= self.kernel_size - 1 

        if mask is not None:
            mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])
        
        x = self.pointwise_conv1(x)
        x = F.glu(x)
        if mask is not None:
            x = torch.masked_fill(x,mask==0, 0.0)

        x = F.pad(x, pad=(0, 0, self.kernel_size - right_context - 1, right_context), value=0.0).transpose(1, 2)
        x = self.depthwise_conv(x)
        x = self.batch_norm(x)
        x = x * torch.sigmoid(x) # swish
        x = x.transpose(1, 2)

        x = self.pointwise_conv2(x)
        if mask is not None:
            x = torch.masked_fill(x,mask==0, 0.0)

        return x

    def inference(self):
        raise NotImplementedError


class ParametricConformerConvolutionModule(nn.Module):
    def __init__(self, d_model, kernel_size, right_context=-1, vectorized_params=False, learnable_params=True, dropout=0.0, activation='relu'):
        super(ParametricConformerConvolutionModule, self).__init__()

        self.d_model = d_model
        self.learnable_params = learnable_params
        self.vectorized_params = vectorized_params
        self.activation = activation

        self.kernel_size = kernel_size
        self.right_context = kernel_size // 2 if right_context == -1 else right_context
        self.left_context = self.kernel_size - self.right_context - 1

        assert self.kernel_size > self.right_context
        assert kernel_size % 2 == 1

        self.pointwise_conv1 = nn.Linear(d_model, d_model * 2 if self.activation=='glu' else d_model, bias=True)

        self.norm = nn.LayerNorm(d_model)

        self.pointwise_conv2 = nn.Linear(d_model, d_model, bias=True)

        if self.learnable_params:
            if self.vectorized_params:
                self.weights = nn.Parameter(torch.Tensor([d_model, 1, kernel_size]))
            else:
                self.weights = nn.Parameter(torch.Tensor([1, 1, kernel_size]))
            torch.nn.init.xavier_normal_(self.weights)
        else:
            self.weights = torch.ones([d_model, 1, kernel_size]).fill_(1 / kernel_size)

        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask=None):
        """
        Args:
            x: [batch_size, time, channels]
            mask: [batch_size, time]
        """

        if mask is not None:
            mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])
        
        x = self.pointwise_conv1(x)
        x = _ACTIVATION[self.activation](x)

        if mask is not None:
            x = torch.masked_fill(x,mask==0, 0.0)

        residual = x
        x = F.pad(x, pad=(0, 0, self.left_context, self.right_context), value=0.0).transpose(1, 2)

        x = x.transpose(1, 2)
        if self.learnable_params and not self.vectorized_params:
            x = F.conv1d(x, self.weights.repeat([self.d_model, 1, 1]), groups=self.d_model)
        else:
            x = F.conv1d(x, self.weights, groups=self.d_model)

        x = x.transpose(1, 2)

        x = self.pointwise_conv2(x + residual)

        if mask is not None:
            x = torch.masked_fill(x,mask==0, 0.0)

        return self.dropout(x)

    def inference(self):
        raise NotImplementedError