import torch
import torch.nn as nn
from utils import init_params


class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.fc1 = nn.Conv2d(in_channels, in_channels // reduction_ratio, 1, bias=False)
        self.relu = nn.ReLU()
        self.fc2 = nn.Conv2d(in_channels // reduction_ratio, in_channels, 1, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc2(self.relu(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)


class SentencePositiveModelWithCBAM(nn.Module):
    def __init__(self, base_model: nn.Module, pretrained_model_name: str, n_classes=2, **kwargs):
        super(SentencePositiveModelWithCBAM, self).__init__()
        self.base = base_model
        dim = 1024 if 'large' in pretrained_model_name else 768
        max_len = kwargs['max_len']
        self.out = nn.Sequential(
            nn.Linear(dim, n_classes)
        )
        self.attention = nn.Sequential(
            nn.Linear(dim, 512),
            nn.ReLU(),
            nn.Linear(512, 1),
            nn.Softmax(dim=1)
        )
        init_params([self.out])  #

    def forward(self, input_ids, attention_mask):
        bert_output = self.base(input_ids=input_ids,
                                attention_mask=attention_mask)
        last_layer_hidden_states = bert_output.hidden_states[-1]
        weights = self.attention(last_layer_hidden_states)

        context_vector = torch.sum(weights * last_layer_hidden_states, dim=1)

        out = self.out(context_vector)

        return out
