import torch 
import torch.nn as nn
import torch.nn.functional as F
from speechbrain.dataio.dataio import length_to_mask
from speechbrain.nnet.CNN import Conv2d as _Conv2d
from speechbrain.nnet.CNN import Conv1d as _Conv1d
from speechbrain.nnet.normalization import BatchNorm2d as _BatchNorm2d
from speechbrain.nnet.linear import Linear
from speechbrain.lobes.models.ECAPA_TDNN import ECAPA_TDNN
import pdb
class Conv1d(_Conv1d):
    def __init__(self, *args, **kwargs):
        super().__init__(skip_transpose=True, *args, **kwargs)

class Conv2d(_Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

class BatchNorm2d(_BatchNorm2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

def circulant(inputs, dim=1):
    """get a circulant version of the tensor along the {dim} dimension.
    The additional axis is appended as the last dimension
    E.g. tensor=[0,1,2], dim=0 --> [[0,1,2],[2,0,1],[1,2,0]]"""
    dims = inputs.shape[dim]
    temp_data = torch.cat([inputs.flip((dim,)), torch.narrow(inputs.flip((dim,)), dim=dim, start=0, length=dims-1)], dim=dim)
    return temp_data.unfold(dim, dims, 1).flip((-1,))

class SELayer(nn.Module):
    def __init__(self, channel, reduction=8):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // reduction),
                nn.ReLU(inplace=True),
                nn.Linear(channel // reduction, channel),
                nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y

class ResBlock(nn.Module):
    '''
    An implementation of building block in CNN-ECAPA.
    ResBlock.

    -----
    Argument:

    in_channels: int
        The number of input channels.
    out_channels: int
        The number of output channels.
    se_channels: int
        The number of output channels after squeezing op.
    kernel_size: int
        The kernel size of ResBlock.
    feat_dim: int
        The feat dim of input.
    '''
    def __init__(self, in_channels, out_channels, se_channels, kernel_size, feats_dim=80):
        super(ResBlock, self).__init__()

        # self.position_encoder = nn.Parameter(torch.randn(feats_dim, requires_grad=True))
        self.conv1 = Conv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size
        )
        self.bn1 = BatchNorm2d(input_size=out_channels)
        self.conv2 = Conv2d(
            in_channels=out_channels, 
            out_channels=out_channels, 
            kernel_size=kernel_size
        )
        self.bn2 = BatchNorm2d(input_size=out_channels)
        self.relu = nn.ReLU(inplace=True)

        # self.seblock = fw_SEBlock(feats_dim, feats_dim, se_channels)
        self.seblock = SELayer(out_channels)

        self.shortcut = None
        if in_channels != out_channels:
            self.shortcut = Conv2d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=1
            )

    def forward(self, x): 
        '''
        x: torch.Tensor
            tensor of shape(B, C, D, T)
        '''
        residual = x
        if self.shortcut != None:
            residual = self.shortcut(x)

        # x = x.transpose(2, 3).add(self.position_encoder).transpose(2, 3)

        x = self.bn1(self.relu(self.conv1(x)))
        x = self.bn2(self.conv2(x))

        x = self.seblock(x.transpose(1, -1)).transpose(1, -1)

        return x + residual


class ECAPA_CNN(nn.Module):
    '''
    An implementation of ECAPA CNN-TDNN in a paper.
    "Integrating Frequency Translational Invariance in TDNNs and Frequency Positional Information in 2D ResNets to Enhance Speaker Verification"
    https://arxiv.org/abs/2104.02370

    -----
    Argument:

    input_size: int
        The feat dim of input data.
    embedding_size:
        Number of neurons in linear layers.
    channels: list of ints
        Output channels for Conv2d layers and ResBlocks.
    kernel_sizes: list of ints
        List of kernel sizes for each layer.
    ecapa_channels: list of ints
        Output channels for layers in ECAPA-TDNN.
    ecapa_kernel_sizes: list of ints
        Kernel sizes for layers in ECAPA-TDNN.
    ecapa_dilations: list of ints
        List of dilations for kernels in each layer in ECAPA-TDNN.
    circulant: Boolean
        input is circulant or not
    '''
    def __init__(
        self,
        input_size,
        lin_neurons=192,
        ecapa_channels=[512, 512, 512, 512, 1536],
        ecapa_kernel_sizes=[5, 3, 3, 3, 1],
        ecapa_dilations=[1, 2, 3, 4, 1],
        attention_channels=128,
        se_channels=128,
        scale=8,
        channels=[128, 128],
        kernel_sizes=[3, 3],
    ):
        super(ECAPA_CNN, self).__init__()

        self.ecapa = ECAPA_TDNN(
            # input_size=channels[3] * int(input_size / 4),
            input_size=input_size,
            lin_neurons=lin_neurons,
            channels=ecapa_channels,
            kernel_sizes=ecapa_kernel_sizes,
            dilations=ecapa_dilations,
            attention_channels=attention_channels,
            se_channels=se_channels,
            res2net_scale=scale
        )

        self.conv1 = Conv2d(
            # in_channels=1,
            in_channels = 6,
            out_channels=channels[0],
            kernel_size=kernel_sizes[0],
            stride=(1, 1)
        )
        self.conv1_relu = nn.Sigmoid()
        self.conv1_bn = BatchNorm2d(input_size=channels[0])

        self.conv2 = Conv2d(
            in_channels=channels[0],
            out_channels=channels[1],
            kernel_size=kernel_sizes[1],
            stride=(1, 1)
        )
        self.conv2_relu = nn.LeakyReLU()
        self.conv2_bn = BatchNorm2d(input_size=channels[1])

        self.fc = Conv2d(
            in_channels=channels[-1],
            out_channels=1,
            kernel_size=1,
        )

    def forward(self, x):
        '''
        x: torch.Tensor
            tensor of shape(batch, time, feats_dim)
        '''
        #speaker embedding, language embedding
        embedding = self.ecapa(x)

        embedding = embedding.reshape(int(embedding.shape[0] / 6), 6, -1)  # ->(B,6,E)
        x = circulant(embedding, dim=-1)  # -> (B,6,E,E)
    
        x = x.transpose(1, -1)   # ->(B, E, E, 6)
        x = self.conv1_bn(self.conv1_relu(self.conv1(x)))
        x = self.conv2_bn(self.conv2_relu(self.conv2(x)))

        x = self.fc(x)
        
        x = x.transpose(1, -1)
        x = x.contiguous().view(x.shape[0], 1, -1)
        
        return x

