import torch
from torch import nn

__all__ = ['AttentionFusion', 'attention_fusion']


class AttentionFusion(nn.Module):

    def __init__(self, backbone, pretrained, num_classes=2, num_features=1000):
        super(AttentionFusion, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes

        self.rgb_stream = backbone(pretrained=pretrained, num_classes=self.num_features)
        self.msr_stream = backbone(pretrained=pretrained, num_classes=self.num_features)
        self.q_kernel = nn.Parameter(torch.ones(1, self.num_features))
        self.classifier = nn.Linear(self.num_features, self.num_classes)

    def forward(self, x_rgb, x_msr):
        x_rgb = self.rgb_stream(x_rgb)
        x_msr = self.msr_stream(x_msr)
        x_concate = torch.cat((x_rgb.unsqueeze(1), x_msr.unsqueeze(1)), 1)
        q_kernel_repeat = self.q_kernel.repeat(x_rgb.size(0), 1, 1)
        attn_scores = torch.bmm(x_concate, q_kernel_repeat.transpose(1, 2))
        attn_weights = torch.softmax(attn_scores, dim=1)
        x = torch.bmm(attn_weights.transpose(1, 2), x_concate)
        x = x.squeeze()
        x = self.classifier(x)
        return x


def attention_fusion(backbone, pretrained=False):
    model = AttentionFusion(backbone=backbone, pretrained=pretrained)
    model.default_cfg = model.rgb_stream.default_cfg
    return model
