import torch
from timm.models.vision_transformer import vit_deit_base_distilled_patch16_224
from timm.models.resnet import resnet18
from torch import nn

__all__ = ['self_attention_fusion']

# from training.models import vit_base_patch16_224, vit_small_patch16_224


class SelfAttentionFusion(nn.Module):

    # ViT if x = self.head(x): num_features=2 else: num_features=768
    # resnet18 if x = self.fc(x): num_features=2 else: num_features=512
    def __init__(self, pretrained, in_chans, num_classes=2, num_features=1000):
        super(SelfAttentionFusion, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes

        # self.rgb_stream = color_space_net(in_chans, self.num_classes, pretrained=True)
        # self.msr_stream = tf_efficientnet_b0_ns(pretrained=pretrained, num_classes=self.num_features)
        # self.rgb_stream = vit_small_patch16_224(in_chans=in_chans, pretrained=pretrained, num_classes=self.num_features)
        self.rgb_stream = vit_deit_base_distilled_patch16_224(pretrained=pretrained, num_classes=self.num_features)
        # self.msr_stream = tf_efficientnet_b0_ns(pretrained=pretrained, num_classes=self.num_features)
        # self.msr_stream = vit_small_patch16_224(in_chans=in_chans, pretrained=pretrained, num_classes=self.num_features)
        self.msr_stream = vit_deit_base_distilled_patch16_224(pretrained=pretrained, num_classes=self.num_features)
        self.q_kernel = nn.Parameter(torch.ones(1, self.num_features))
        self.k_kernel = nn.Parameter(torch.ones(1, self.num_features))
        self.v_kernel = nn.Parameter(torch.ones(1, self.num_features))
        self.classifier = nn.Linear(self.num_features, self.num_classes)

    def forward(self, x_rgb, x_msr):

        x_rgb, x_rgb_dist = self.rgb_stream(x_rgb)
        # print("x_rgb.shape: ", x_rgb.shape)

        # print("x_rgb_dist.shape: ", x_rgb_dist.shape)

        x_msr, x_msr_dist = self.msr_stream(x_msr)

        x_concate_q = torch.cat((x_rgb.unsqueeze(1), x_msr.unsqueeze(1)), 1)
        q_kernel_repeat = self.q_kernel.repeat(x_rgb.size(0), 1, 1)
        attn_scores_q = torch.bmm(x_concate_q, q_kernel_repeat.transpose(1, 2))
        attn_weights_q = torch.softmax(attn_scores_q, dim=1)
        x_q = torch.bmm(attn_weights_q.transpose(1, 2), x_concate_q)


        x_concate_k = torch.cat((x_rgb_dist.unsqueeze(1), x_msr_dist.unsqueeze(1)), 1)
        k_kernel_repeat = self.k_kernel.repeat(x_rgb_dist.size(0), 1, 1)
        attn_scores_k = torch.bmm(x_concate_k, k_kernel_repeat.transpose(1, 2))
        attn_weights_k = torch.softmax(attn_scores_k, dim=1)
        x_k = torch.bmm(attn_weights_k.transpose(1, 2), x_concate_k)

        x_concate_v = torch.cat((x_q, x_k), 1)
        v_kernel_repeat = self.v_kernel.repeat(x_rgb.size(0), 1, 1)
        attn_scores_v = torch.bmm(x_concate_v, v_kernel_repeat.transpose(1, 2))
        attn_weights_v = torch.softmax(attn_scores_v, dim=1)
        x = torch.bmm(attn_weights_v.transpose(1, 2), x_concate_v)
        x = x.squeeze()
        x = self.classifier(x)



        return x


def self_attention_fusion(in_chans=3, pretrained=False):
    model = SelfAttentionFusion(in_chans=in_chans, pretrained=pretrained)
    model.default_cfg = model.rgb_stream.default_cfg
    return model

if __name__ == "__main__":
    model = SelfAttentionFusion(in_chans=3, num_classes=2, pretrained=True)
    x1 = torch.randn(4, 3, 224, 224)
    x2 = torch.randn(4, 3, 224, 224)
    regression = model(x1, x2)