import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
from utils import init_params


class SALayer(nn.Module):
    """Constructs a Channel Spatial Group module.
    Args:
        k_size: Adaptive selection of kernel size
    """

    def __init__(self, channel, groups=16):
        super(SALayer, self).__init__()
        self.groups = groups
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.cweight = Parameter(torch.zeros(1, channel // (2 * groups), 1))
        self.cbias = Parameter(torch.ones(1, channel // (2 * groups), 1))
        self.sweight = Parameter(torch.zeros(1, channel // (2 * groups), 1))
        self.sbias = Parameter(torch.ones(1, channel // (2 * groups), 1))

        self.sigmoid = nn.Sigmoid()
        self.gn = nn.GroupNorm(channel // (2 * groups), channel // (2 * groups))

    @staticmethod
    def channel_shuffle(x, groups):
        b, c, h = x.shape

        x = x.reshape(b, groups, -1, h)
        x = x.permute(0, 2, 1, 3)

        # flatten
        x = x.reshape(b, -1, h)

        return x

    def forward(self, x):
        b, c, h = x.shape

        x = x.reshape(b * self.groups, -1, h)
        x_0, x_1 = x.chunk(2, dim=1)

        # channel attention
        xn = self.avg_pool(x_0)
        xn = self.cweight * xn + self.cbias
        xn = x_0 * self.sigmoid(xn)

        # spatial attention
        xs = self.gn(x_1)
        xs = self.sweight * xs + self.sbias
        xs = x_1 * self.sigmoid(xs)

        # concatenate along channel axis
        out = torch.cat([xn, xs], dim=1)
        out = out.reshape(b, -1, h)

        out = self.channel_shuffle(out, 2)
        return out


class SaNetSentencePositiveModel(nn.Module):
    def __init__(self, base_model: nn.Module, pretrained_model_name: str, max_len=128, n_classes=2):
        super(SaNetSentencePositiveModel, self).__init__()
        self.base = base_model
        dim = 1024 if 'large' in pretrained_model_name else 768
        self.max_len = max_len
        self.out = nn.Sequential(
            nn.Linear(dim, n_classes)
        )
        self.sa = SALayer(max_len)
        self.attention = self.sa
        init_params([self.out])  #

    def forward(self, input_ids, attention_mask):
        bert_output = self.base(input_ids=input_ids,
                                attention_mask=attention_mask)
        last_layer_hidden_states = bert_output.hidden_states[-1]
        last_layer_hidden_states = self.sa(last_layer_hidden_states)

        context_vector = torch.sum(last_layer_hidden_states, dim=1)

        out = self.out(context_vector)

        return out
