import torch
import torch.nn as nn
import torch.nn.functional as F

from model.SharedSpecificBert import SharedSpecificBERT
from model.SharedSpecificViT import SharedSpecificViT
from model.CrossAttn import CrossModalAttention
from model.model import Classifier

__all__ = ['MultiModalAttentionModel']

class MultiModalAttentionModel(nn.Module):
    def __init__(self, args):
        super(MultiModalAttentionModel, self).__init__()
        
        self.args = args

        self.shared_specific_bert = SharedSpecificBERT(args)
        self.shared_specific_vit = SharedSpecificViT(args)
        self.cross_attn = CrossModalAttention(dim=args.img_out)

        self.tv_attn = nn.MultiheadAttention(args.text_out, num_heads=4, dropout=0.1)
        self.vt_attn = nn.MultiheadAttention(args.text_out, num_heads=4, dropout=0.1)

        self.mm_classfier = Classifier(args.mm_dropout, args.text_out + args.img_out, args.post_dim, args.output_dim)

    def forward(self, text=None, image=None, label=None, infer=False, as_encoder=False):
        criterion = torch.nn.CrossEntropyLoss(reduction='none')

        shared_text_feat, specific_text_feat = self.shared_specific_bert(text)
        shared_image_feat, specific_image_feat = self.shared_specific_vit(image)


        # fused_image, fused_text = self.cross_attn(shared_image_feat[:, 0, :], shared_text_feat[:, 0, :])
        # fused_image, fused_text = shared_image_feat[:, 0, :], shared_text_feat[:, 0, :]

        shared_image_feat = shared_image_feat.permute(1,0,2)
        shared_text_feat = shared_text_feat.permute(1,0,2)
        # shared_image_feat = F.normalize(shared_image_feat, p=2, dim=-1)
        # shared_text_feat = F.normalize(shared_text_feat, p=2, dim=-1)

        # q_weight, k_weight, v_weight = torch.split(self.tv_attn.in_proj_weight, self.args.text_out, dim=0)
        # q_weight = F.normalize(q_weight, p=2, dim=0)
        # k_weight = F.normalize(k_weight, p=2, dim=0)
        # v_weight = F.normalize(v_weight, p=2, dim=0)
        # self.tv_attn.in_proj_weight = nn.Parameter(torch.cat([q_weight, k_weight, v_weight], dim=0))

        # q_weight, k_weight, v_weight = torch.split(self.vt_attn.in_proj_weight, self.args.text_out, dim=0)
        # q_weight = F.normalize(q_weight, p=2, dim=0)
        # k_weight = F.normalize(k_weight, p=2, dim=0)
        # v_weight = F.normalize(v_weight, p=2, dim=0)
        # self.vt_attn.in_proj_weight = nn.Parameter(torch.cat([q_weight, k_weight, v_weight], dim=0))

        fused_text, _ = self.tv_attn(shared_text_feat, shared_image_feat, shared_image_feat)
        fused_image, _ = self.vt_attn(shared_image_feat, shared_text_feat, shared_text_feat)

        fused_text += shared_text_feat
        fused_image += shared_image_feat

        fused_text = fused_text.permute(1,0,2)
        fused_image = fused_image.permute(1,0,2)

        if as_encoder:
            return fused_text[:, 0, :], fused_image[:, 0, :]

        # fusion = torch.cat([fused_text, fused_image], dim=-1)
        fusion = torch.cat([fused_text[:, 0, :], fused_image[:, 0, :]], dim=-1)
        output = self.mm_classfier(fusion)

        if infer:
            return output
        
        loss = torch.mean(criterion(output, label))
        
        return loss, output
    
    def infer(self, text=None, image=None):
        return self.forward(text=text, image=image, infer=True)