import torch
import torch.nn as nn
from torch.nn import Parameter, init

class SharedConv2d(nn.Module):
    def __init__(self, nIn, nOut_1, nOut_2, nOut_3, *args) -> None:
        super(SharedConv2d, self).__init__()
        self.conv_1 = nn.Conv2d(nIn, nOut_1, *args)
        self.conv_shared = nn.Conv2d(nIn, nOut_2, *args)
        self.conv_2 = nn.Conv2d(nIn, nOut_3, *args)
        print("in-domain only dim:", nOut_1)
        print("out-domain only dim:", nOut_3)
        print("shared dim:", nOut_2)

    def forward(self, x, mode="id"):
        if mode == "id":
            conv_1_out = self.conv_1(x)
            conv_shared_out = self.conv_shared(x)
            conv_out = torch.cat([conv_1_out, conv_shared_out], dim=1)
            return conv_out
        elif mode == "od":
            conv_shared_out = self.conv_shared(x)
            conv_2_out = self.conv_2(x)
            conv_out = torch.cat([conv_shared_out, conv_2_out], dim=1)
            return conv_out
        else:
            return NotImplementedError

class SharedContextGating(nn.Module):
    def __init__(self, input_num):
        super(SharedContextGating, self).__init__()
        self.sigmoid = nn.Sigmoid()
        self.linear_id = nn.Linear(input_num, input_num)
        self.linear_od = nn.Linear(input_num, input_num)

    def forward(self, x, mode="id"):
        if mode == "id":
            lin = self.linear_id(x.permute(0, 2, 3, 1))
        elif mode == "od":
            lin = self.linear_od(x.permute(0, 2, 3, 1))
        else:
            return NotImplementedError
        lin = lin.permute(0, 3, 1, 2)
        sig = self.sigmoid(lin)
        res = x * sig
        return res

class SwitchBN(nn.Module):
    def __init__(self, nOut) -> None:
        super(SwitchBN, self).__init__()
        self.bn_id = nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99, affine=True)
        self.bn_od = nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99, affine=True)
        shared_weight = Parameter(torch.empty(nOut))
        shared_bias = Parameter(torch.empty(nOut))
        init.ones_(shared_weight)
        init.zeros_(shared_bias)
        # override weight/bias in both bn
        self.bn_id.weight = shared_weight
        self.bn_od.weight = shared_weight
        self.bn_id.bias = shared_bias
        self.bn_od.bias = shared_bias
        

    def forward(self, x, mode=None):
        if mode == "od":
            return self.bn_od(x)
        elif mode == "id":
            return self.bn_id(x)
        else:
            return NotImplementedError

class SharedCNN(nn.Module):
    def __init__(
        self,
        n_in_channel,
        activation="Relu",
        conv_dropout=0,
        kernel_size=[3, 3, 3],
        padding=[1, 1, 1],
        stride=[1, 1, 1],
        nb_filters=[64, 64, 64],
        pooling=[(1, 4), (1, 4), (1, 4)],
        normalization="batch",
        **transformer_kwargs
    ):
        super(SharedCNN, self).__init__()
        self.nb_filters = nb_filters
        cnn = nn.Sequential()
        self.feat_shape = [626, 128]
        if normalization == "switch":
            print("Using SwitchBN")

        def conv(i, normalization="batch", dropout=None, activ="relu", pooling=None):
            nIn = n_in_channel if i == 0 else nb_filters[i - 1]
            nOut = nb_filters[i]
            nOut_1 = nOut // 2
            nOut_2 = nOut - nOut_1
            nOut_3 = nOut_1
            cnn.add_module(
                "conv{0}".format(i),
                SharedConv2d(nIn, nOut_1, nOut_2, nOut_3, kernel_size[i], stride[i], padding[i]),
            )
            cnn.add_module("switchnorm{0}".format(i), SwitchBN(nOut))
            if activ.lower() == "cg":
                cnn.add_module("cg{0}".format(i), SharedContextGating(nOut))
            else:
                raise NotImplementedError("activation only support CG")
            if dropout is not None:
                cnn.add_module("dropout{0}".format(i), nn.Dropout(dropout))

        # 128x862x64
        pooling = [[1, 1]] + pooling
        for i in range(len(nb_filters)):
            conv(i, normalization=normalization, dropout=conv_dropout, activ=activation, pooling=pooling[i])
            cnn.add_module(
                "pooling{0}".format(i), nn.AvgPool2d(pooling[i + 1])
            )  # bs x tframe x mels

        self.cnn = cnn

    def forward(self, x, mode=None):
        # conv features
        for layer in self.cnn:
            if type(layer) in (SwitchBN, SharedContextGating, SharedConv2d):
                x = layer(x, mode)
            else:
                x = layer(x)
        return x
