from torch import nn
import torch

from training.models.resnet import Bottleneck

__all__ = ['AttentionFusion', 'attention_fusion']


class BottomUpTopDownAttention(nn.Module):

    def __init__(self, in_channels=728, out_channels=1):
        super(BottomUpTopDownAttention, self).__init__()
        # Bottom up top down
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.softmax1_blocks = nn.Sequential(
            # Bottleneck has expansion coefficient 4, so out_channels divided by 4
            Bottleneck(inplanes=in_channels, planes=in_channels // 4),
            Bottleneck(inplanes=in_channels, planes=in_channels // 4),
        )
        self.interpolation1 = nn.UpsamplingBilinear2d(size=(19, 19))
        self.softmax2_blocks = nn.Sequential(
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        mask = self.maxpool1(x)
        mask = self.softmax1_blocks(mask)
        mask = self.interpolation1(mask) + x
        mask = self.softmax2_blocks(mask)
        x = (1 + mask) * x
        return x


class AttentionFusion(nn.Module):

    def __init__(self, backbone, pretrained, hidden_size=1000):
        super(AttentionFusion, self).__init__()
        self.rgb_stream = backbone(pretrained=pretrained, num_classes=hidden_size)
        self.rgb_attn = BottomUpTopDownAttention()
        self.msr_stream = backbone(pretrained=pretrained, num_classes=hidden_size)
        self.q_kernel = nn.Parameter(torch.ones(1, hidden_size))

    def forward(self, x_rgb, x_msr):
        x_rgb = self.rgb_stream(x_rgb)
        x_msr = self.msr_stream(x_msr)
        x_concate = torch.cat((x_rgb.unsqueeze(1), x_msr.unsqueeze(1)), 1)
        q_kernel_repeat = self.q_kernel.repeat(x_rgb.size(0), 1, 1)
        attn_scores = torch.bmm(x_concate, q_kernel_repeat.transpose(1, 2))
        attn_weights = torch.softmax(attn_scores, dim=1)
        x = torch.bmm(attn_weights.transpose(1, 2), x_concate)
        x = x.squeeze()
        return x


def attention_fusion(backbone, pretrained=False):
    model = AttentionFusion(backbone=backbone, pretrained=pretrained)
    model.default_cfg = model.rgb_stream.default_cfg
    return model
