import torch 
import torch.nn as nn
import torch.nn.functional as F
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.nnet.CNN import Conv2d as _Conv2d
from speechbrain.nnet.CNN import Conv1d as _Conv1d
from speechbrain.nnet.normalization import BatchNorm2d as _BatchNorm2d
from speechbrain.nnet.linear import Linear
from speechbrain.lobes.models.ECAPA_TDNN import ECAPA_TDNN
import pdb
class Conv1d(_Conv1d):
    def __init__(self, *args, **kwargs):
        super().__init__(skip_transpose=True, *args, **kwargs)

class Conv2d(_Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

class BatchNorm2d(_BatchNorm2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

class fw_SEBlock(nn.Module):
    '''
    An plementation of frequency-wise squeeze-and-excitation block in paper.
    -----
    Arguments:

    in_channels: int
        The number of input channels.
    out_channels: int
        The number of outpout channels.
    se_channels: int
        The number of outpout channels after squeezing op.

    '''

    def __init__(self, in_channels, out_channels, se_channels):
        super(fw_SEBlock, self).__init__()
        
        self.conv1 = Conv1d(
            in_channels=in_channels, out_channels=se_channels, kernel_size=1
        )
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = Conv1d(
            in_channels=se_channels, out_channels=out_channels, kernel_size=1
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        '''
        x: torch.Tensor
            Tensor of shape(B, C, D, T)
        '''
        x = x.transpose(1, 2) #->(B, D, T, C)
        T = x.shape[2]
        C = x.shape[3]
        z = x.sum(dim=3).sum(dim=2, keepdim=True).divide(C * T) #->(B, D, 1)
        
        s = self.relu(self.conv1(z))
        s = self.sigmoid(self.conv2(s))
        s = s.unsqueeze(3)
        x = s * x
        return x.transpose(1, 2)

class ResBlock(nn.Module):
    '''
    An implementation of building block in CNN-ECAPA.
    ResBlock.

    -----
    Argument:

    in_channels: int
        The number of input channels.
    out_channels: int
        The number of output channels.
    se_channels: int
        The number of output channels after squeezing op.
    kernel_size: int
        The kernel size of ResBlock.
    feat_dim: int
        The feat dim of input.
    '''
    def __init__(self, in_channels, out_channels, se_channels, kernel_size, feats_dim=80):
        super(ResBlock, self).__init__()

        self.position_encoder = nn.Parameter(torch.randn(feats_dim, requires_grad=True))
        self.conv1 = Conv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size
        )
        self.bn1 = BatchNorm2d(input_size=out_channels)
        self.conv2 = Conv2d(
            in_channels=out_channels, 
            out_channels=out_channels, 
            kernel_size=kernel_size
        )
        self.bn2 = BatchNorm2d(input_size=out_channels)
        self.relu = nn.ReLU(inplace=True)

        self.seblock = fw_SEBlock(feats_dim, feats_dim, se_channels)
        self.shortcut = None
        if in_channels != out_channels:
            self.shortcut = Conv2d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=1
            )

    def forward(self, x): 
        '''
        x: torch.Tensor
            tensor of shape(B, C, D, T)
        '''
        residual = x
        if self.shortcut != None:
            residual = self.shortcut(x)

        x = x.transpose(2, 3).add(self.position_encoder).transpose(2, 3)

        x = self.bn1(self.relu(self.conv1(x)))
        x = self.bn2(self.conv2(x))

        x = self.seblock(x)

        return x + residual


class CNN_ECAPA(nn.Module):
    '''
    An implementation of ECAPA CNN-TDNN in a paper.
    "Integrating Frequency Translational Invariance in TDNNs and Frequency Positional Information in 2D ResNets to Enhance Speaker Verification"
    https://arxiv.org/abs/2104.02370

    -----
    Argument:

    input_size: int
        The feat dim of input data.
    embedding_size:
        Number of neurons in linear layers.
    channels: list of ints
        Output channels for Conv2d layers and ResBlocks.
    kernel_sizes: list of ints
        List of kernel sizes for each layer.
    ecapa_channels: list of ints
        Output channels for layers in ECAPA-TDNN.
    ecapa_kernel_sizes: list of ints
        Kernel sizes for layers in ECAPA-TDNN.
    ecapa_dilations: list of ints
        List of dilations for kernels in each layer in ECAPA-TDNN.
    '''
    def __init__(
        self,
        input_size,
        lin_neurons=192,
        channels=[128, 128, 128, 128],
        kernel_sizes=[3, 3, 3, 3],
        ecapa_channels=[512, 512, 512, 512, 1536],
        ecapa_kernel_sizes=[5, 3, 3, 3, 1],
        ecapa_dilations=[1, 2, 3, 4, 1],
        attention_channels=128,
        se_channels=128,
        scale=8
    ):
        super(CNN_ECAPA, self).__init__()
        self.conv1 = Conv2d(
            in_channels=1,
            out_channels=channels[0],
            kernel_size=kernel_sizes[0],
            stride=(2, 1)
        )
        self.conv1_relu = nn.ReLU()
        self.conv1_bn = BatchNorm2d(input_size=channels[0])

        self.resblock1 = ResBlock(
            in_channels=channels[0],
            out_channels=channels[1],
            se_channels=se_channels,
            kernel_size=kernel_sizes[1],
            feats_dim=int(input_size / 2)
        )

        self.resblock2 = ResBlock(
            in_channels=channels[1],
            out_channels=channels[2],
            se_channels=se_channels,
            kernel_size=kernel_sizes[2],
            feats_dim=int(input_size / 2)
        )

        self.conv2 = Conv2d(
            in_channels=channels[2],
            out_channels=channels[3],
            kernel_size=kernel_sizes[3],
            stride=(2, 1)
        )
        self.conv2_relu = nn.ReLU()
        self.conv2_bn = BatchNorm2d(input_size=channels[3])

        self.ecapa = ECAPA_TDNN(
            input_size=channels[3] * int(input_size / 4),
            lin_neurons=lin_neurons,
            channels=ecapa_channels,
            kernel_sizes=ecapa_kernel_sizes,
            dilations=ecapa_dilations,
            attention_channels=attention_channels,
            se_channels=se_channels,
            res2net_scale=scale
        )

    def forward(self, x):
        '''
        x: torch.Tensor
            tensor of shape(batch, time, feats_dim)
        '''
        x = x.transpose(1, 2).unsqueeze(1) # ->(B, 1, D, T)

        x = x.transpose(1, -1)
        x = self.conv1_bn(self.conv1_relu(self.conv1(x)))
        x = self.resblock1(x)
        x = self.resblock2(x)
        x = self.conv2_bn(self.conv2_relu(self.conv2(x)))
        x = x.contiguous().view(x.shape[0], x.shape[1], -1)
        #speaker embedding, language embedding
        embedding = self.ecapa(x)

        #language_embedding = self.ecapa(x)
        return embedding #, language_embedding
