import torch.nn as nn

class GLU(nn.Module):
    def __init__(self, input_num):
        super(GLU, self).__init__()
        self.sigmoid = nn.Sigmoid()
        self.linear = nn.Linear(input_num, input_num)

    def forward(self, x):
        lin = self.linear(x.permute(0, 2, 3, 1))
        lin = lin.permute(0, 3, 1, 2)
        sig = self.sigmoid(x)
        res = lin * sig
        return res


class ContextGating(nn.Module):
    def __init__(self, input_num):
        super(ContextGating, self).__init__()
        self.sigmoid = nn.Sigmoid()
        self.linear = nn.Linear(input_num, input_num)

    def forward(self, x):
        lin = self.linear(x.permute(0, 2, 3, 1))
        lin = lin.permute(0, 3, 1, 2)
        sig = self.sigmoid(lin)
        res = x * sig
        return res


class CNN(nn.Module):
    def __init__(
        self,
        n_in_channel,
        activation="Relu",
        dropout=0,
        kernel_size=[3, 3, 3],
        padding=[1, 1, 1],
        stride=[1, 1, 1],
        channels=[64, 64, 64],
        pooling=[(1, 4), (1, 4), (1, 4)],
        normalization="batch"
    ):
        super(CNN, self).__init__()
        self.channels = channels
        cnn = nn.Sequential()

        def conv(i, normalization="batch", dropout=None, activ="relu", pooling=None):
            nIn = n_in_channel if i == 0 else channels[i - 1]
            nOut = channels[i]
            cnn.add_module(
                "conv{0}".format(i),
                nn.Conv2d(nIn, nOut, kernel_size[i], stride[i], padding[i]) # if i != 0 else nn.Conv2d(nIn, nOut, (kernel_size[i], 1), stride[i], padding[i]),
            )
            if normalization == "batch":
                cnn.add_module("batchnorm{0}".format(i), nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99, affine=True, track_running_stats=True))
            if activ.lower() == "leakyrelu":
                cnn.add_module("relu{0}".format(i), nn.LeakyReLU(0.2))
            elif activ.lower() == "relu":
                cnn.add_module("relu{0}".format(i), nn.ReLU())
            elif activ.lower() == "glu":
                cnn.add_module("glu{0}".format(i), GLU(nOut))
            elif activ.lower() == "cg":
                cnn.add_module("cg{0}".format(i), ContextGating(nOut))
            if dropout is not None:
                cnn.add_module("dropout{0}".format(i), nn.Dropout(dropout))

        # 128x862x64
        pooling = [[1, 1]] + pooling
        for i in range(len(channels)):
            conv(i, normalization=normalization, dropout=dropout, activ=activation, pooling=pooling[i])
            cnn.add_module(
                "pooling{0}".format(i), nn.AvgPool2d(pooling[i + 1])
            )  # bs x tframe x mels

        self.cnn = cnn

    def forward(self, x):
        for i, layer in enumerate(self.cnn):
            x = layer(x)
        return x
