import torch
from torch import nn

__all__ = ['attention_fusion']

from timm.models.vision_transformer import vit_deit_base_patch16_224, vit_base_patch16_224, \
    vit_base_resnet50_224_in21k, vit_deit_tiny_patch16_224, vit_deit_small_patch16_224, \
    vit_deit_base_patch16_224, vit_base_patch16_224
from timm.models.resnet import resnet18


class AttentionFusion(nn.Module):

    # ViT if x = self.head(x): num_features=2 else: num_features=768
    # resnet18 if x = self.fc(x): num_features=2 else: num_features=512
    def __init__(self, pretrained, in_chans, num_classes=2, num_features=2):
        super(AttentionFusion, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes

        # self.rgb_stream = color_space_net(in_chans, self.num_classes, pretrained=True)
        self.rgb_stream = resnet18(num_classes=self.num_features, pretrained=True)
        # self.rgb_stream = vit_small_patch16_224(in_chans=in_chans, pretrained=pretrained, num_classes=self.num_features)

        # self.msr_stream = tf_efficientnet_b3_ns(pretrained=pretrained, num_classes=self.num_features)
        self.msr_stream = resnet18(num_classes=self.num_features, pretrained=True)
        # self.msr_stream = vit_small_patch16_224(in_chans=in_chans, pretrained=pretrained, num_classes=self.num_features)
        self.q_kernel = nn.Parameter(torch.ones(1, self.num_features))
        self.classifier = nn.Linear(self.num_features, self.num_classes)

    def forward(self, x_rgb, x_msr):
        x_rgb = self.rgb_stream(x_rgb)
        x_msr = self.msr_stream(x_msr)
        x_concate = torch.cat((x_rgb.unsqueeze(1), x_msr.unsqueeze(1)), 1)
        q_kernel_repeat = self.q_kernel.repeat(x_rgb.size(0), 1, 1)
        attn_scores = torch.bmm(x_concate, q_kernel_repeat.transpose(1, 2))
        attn_weights = torch.softmax(attn_scores, dim=1)
        x = torch.bmm(attn_weights.transpose(1, 2), x_concate)
        x = x.squeeze()
        x = self.classifier(x)
        return x


def attention_fusion(in_chans=3, pretrained=False):
    model = AttentionFusion(in_chans=in_chans, pretrained=pretrained)
    model.default_cfg = model.rgb_stream.default_cfg
    return model
