import torch
import torch.nn as nn
import torch.nn.functional as F


# from torch.nn.modules.normalization import LayerNorm


class Swish(nn.Module):
    def __init__(self):
        super(Swish, self).__init__()

    def forward(self, inputs):
        return inputs * inputs.sigmoid()


class SELayer(nn.Module):
    def __init__(self, dim):
        super(SELayer, self).__init__()
        assert dim % 8 == 0, 'Dimernsion should be divisible by 8.'
        self.dim = dim
        self.layers = nn.Sequential(
            nn.Linear(dim, dim // 8),
            Swish(),
            nn.Linear(dim // 8, dim)
        )

    def forward(self, inputs):
        """
        inputs: B
        input_lengths: B,D,L
        """
        residual = inputs
        seq_lengths = inputs.shape[2]
        inputs = inputs.sum(dim=2) / seq_lengths
        output = self.layers(inputs)
        output = output.sigmoid().unsqueeze(2)
        output = output.repeat(1, 1, seq_lengths)
        return torch.mul(output, residual)


class SEInvolution1D(nn.Module):
    def __init__(self, d_model, ratio, group, general_kernel_size, kernel_size, dropout):
        super(SEInvolution1D, self).__init__()
        self.general_kernel_size = general_kernel_size
        self.kernel_size = kernel_size
        self.group = group
        self.reduce = nn.Conv1d(d_model, d_model // ratio, general_kernel_size)
        self.act = nn.ReLU()
        self.span = nn.Conv1d(d_model // ratio, group * kernel_size, general_kernel_size)

        self.se_layer = SELayer(group * kernel_size)
        self.unfold = nn.Unfold((1, kernel_size))
        self.dropout = nn.Dropout(dropout)

    def forward(self, input):
        """
        input: B,D,L
        """
        b, t, d = input.size()
        input = input.permute(0, 2, 1)

        kernel_input = F.pad(input, (self.general_kernel_size // 2, self.general_kernel_size // 2), value=0)
        reduced_input = self.act(self.reduce(kernel_input))
        reduced_input = F.pad(reduced_input, (self.general_kernel_size // 2, self.general_kernel_size // 2), value=0)
        kernel = self.se_layer(self.span(reduced_input))

        unfold_input = F.pad(input, (self.kernel_size // 2, self.kernel_size // 2), value=0)
        unfold_input = self.unfold(unfold_input.unsqueeze(-2))

        kernel = kernel.view(b, self.group, self.kernel_size, t).unsqueeze(2)
        unfold_input = unfold_input.view(b, self.group, -1, self.kernel_size, t)
        out = self.dropout(torch.mul(kernel, unfold_input).sum(dim=3))

        out = out.view(b, d, t)
        out = out.permute(0, 2, 1)  # b,t,d
        return out


class Self_Conv_Encoder_layer(nn.Module):
    def __init__(
            self,
            size=512,
            d_ffn=1024,
            ratio=32,
            group=256,
            general_kernel_size=1,
            kernel_size=5,
            dropout1=0.1,
            dropout2=0.1):
        super(Self_Conv_Encoder_layer, self).__init__()
        self.norm1 = nn.LayerNorm(size)
        self.linear1 = nn.Linear(size, d_ffn)
        self.dropout1 = nn.Dropout(dropout1)
        self.act = nn.GELU()
        self.spatial_gating = SEInvolution1D(d_ffn, ratio, group, general_kernel_size, kernel_size, dropout2)
        self.norm2 = nn.LayerNorm(d_ffn)
        self.linear2 = nn.Linear(d_ffn, size)

    def forward(self, x):
        residual = x
        x1 = self.norm1(x)
        x = self.dropout1(self.act(self.linear1(x1)))
        x = self.norm2(self.spatial_gating(x))
        x = self.linear2(x)
        return residual + x


if __name__ == '__main__':
    input = torch.randn(5, 25, 512)
    # en = Encoder_layer()
    # output = en(input)
    # print(output.size())
