import copy
import math
import torch
import numpy as np
import torch.nn as nn
from math import sqrt
import torch.nn.functional as F
# from resnet3d import resnet18
# from cascade_comp_inf_model import cascade_comp_inf_model
from PredictTCL_main.resnet3d import resnet18
from PredictTCL_main.cascade_comp_inf_model import cascade_comp_inf_model

class ContrastiveDisFusion(torch.nn.Module):

    def __init__(self, args=None, criterion=None, dim_list=None, num_classes=None):
        super(ContrastiveDisFusion, self).__init__()

        self.feature_dim = 64
        self.args = args
        self.criterion = criterion
        self.num_classes = num_classes
        self.eps = torch.tensor(0.00000001, dtype=torch.float32, requires_grad=False)
        self.mse_loss = nn.MSELoss(reduction='mean')

        if self.args.dataset_name == 'ABIDE':
            self.modal_num = len(dim_list)
            self.backbone_encoder = nn.ModuleList([])
            for i in range(self.modal_num):
                self.backbone_encoder.append(
                    VLTransformer(dim_list[i], args.vl_hidden, args.n_head, args.vl_dropout, args.vl_nlayer))
            self.dim_out = self.args.vl_hidden * self.args.n_head
        else:
            if self.args.dataset_name == 'men' or 'mrnet' in self.args.dataset_name:
                self.modal_num = 3
            elif self.args.dataset_name == 'brats':
                self.modal_num = 4

            if self.args.model_depth == 18:
                resnet = resnet18(modal_num=1)
            elif self.args.model_depth == 34:
                resnet = resnet34(modal_num=1)
            elif self.args.model_depth == 50:
                resnet = resnet50(modal_num=1)

            backbone = list(resnet.children())

            if self.args.model_depth == 18 or self.args.model_depth == 34:
                self.dim_out = 512
            elif self.args.model_depth == 50:
                self.dim_out = 2048
            else:
                print('wrong model_depth!')
            backbone_base = nn.Sequential(*backbone[:10])

            # backbone

            self.backbone_encoder = nn.ModuleList([])
            for i in range(self.modal_num):
                self.backbone_encoder.append(nn.Sequential(copy.deepcopy(backbone_base),
                                                           nn.ReLU(),
                                                           nn.Dropout(p=self.args.encoder_drop)))

        # disentanglement
        # att
        if self.args.attention_type == 'sigmoid':
            self.sigmoid_op = nn.Sigmoid()
            self.linear_inter_att = nn.ModuleList([])
            self.linear_inter_sup_att = nn.ModuleList([])
            self.linear_dif_att = nn.ModuleList([])
            for i in range(self.modal_num):
                self.linear_inter_att.append(nn.Sequential(nn.Linear(self.dim_out, self.dim_out),
                                                           nn.ReLU()))
                self.linear_inter_sup_att.append(nn.Sequential(nn.Linear(self.dim_out, self.dim_out),
                                                           nn.ReLU()))
                self.linear_dif_att.append(nn.Sequential(nn.Linear(self.dim_out, self.dim_out),
                                                         nn.ReLU()))

        # dis
        self.encoder_hidden = [256, self.feature_dim]
        self.linear_dis_inter = nn.ModuleList([])
        self.linear_dis_inter_sup = nn.ModuleList([])
        self.linear_dis_dif = nn.ModuleList([])
        for i in range(self.modal_num):
            self.linear_dis_inter.append(
                nn.Sequential(nn.Linear(self.dim_out, self.encoder_hidden[0]),
                              nn.ReLU(),
                              nn.Dropout(p=self.args.encoder_drop),
                              nn.Linear(self.encoder_hidden[0], self.encoder_hidden[1])))

            self.linear_dis_inter_sup.append(
                nn.Sequential(nn.Linear(self.dim_out, self.encoder_hidden[0]),
                              nn.ReLU(),
                              nn.Dropout(p=self.args.encoder_drop),
                              nn.Linear(self.encoder_hidden[0], self.encoder_hidden[1])))

            self.linear_dis_dif.append(
                nn.Sequential(nn.Linear(self.dim_out, self.encoder_hidden[0]),
                              nn.ReLU(),
                              nn.Dropout(p=self.args.encoder_drop),
                              nn.Linear(self.encoder_hidden[0], self.encoder_hidden[1])))

        if self.args.ifrecon == 'T':
            self.decoder_hidden = [256,self.dim_out]
            self.decoder_recon = nn.ModuleList([])
            for i in range(self.modal_num**2):
                self.decoder_recon.append(
                    nn.Sequential(nn.Linear(self.feature_dim*3, self.decoder_hidden[0]),
                                  nn.ReLU(),
                                  nn.Linear(self.decoder_hidden[0], self.decoder_hidden[1]),
                                  nn.Tanh()))

        if self.args.inter_fusion_type == 'concat':
            self.linear_inter_fusion = nn.Sequential(nn.ReLU(),
                                                     nn.Linear(self.feature_dim*self.modal_num, self.feature_dim))

        if self.args.inter_sup_fusion_type == 'concat':
            self.linear_inter_sup_fusion = nn.Sequential(nn.ReLU(),
                                                     nn.Linear(self.feature_dim*self.modal_num, self.feature_dim//2),
                                                     nn.ReLU(),
                                                     nn.Linear(self.feature_dim//2, self.feature_dim))

        if self.args.inter_inter_sup_fusion_type == 'concat':
            self.linear_inter_inter_sup_fusion = nn.Sequential(nn.ReLU(),
                                                     nn.Linear(self.feature_dim*2, self.feature_dim))

        if self.args.cma_type == 'concat_q':
            # self.linear_inter = nn.Sequential(nn.ReLU(),
            #                                   nn.Linear(self.feature_dim, self.feature_dim * self.modal_num))
            # self.layer_norm = nn.LayerNorm(self.feature_dim, eps=1e-6)
            pass

        # if self.ifinter_sa == 'T':
        #     self.linear_q_inter = nn.Linear(self.feature_dim, self.feature_dim)
        #     self.linear_k_inter = nn.Linear(self.feature_dim, self.feature_dim)
        #     self.linear_v_inter = nn.Linear(self.feature_dim, self.feature_dim)

        if self.args.fusion_type == 'cma':
            if self.args.cma_type == 'concat_q':
                self.cascade_comp_cma = cascade_comp_inf_model(self.feature_dim, self.args)
                self.linear_q = nn.Sequential(nn.ReLU(),
                                             nn.Linear(self.feature_dim*self.modal_num, self.feature_dim))
            elif self.args.cma_type == 'cascade_comp_inf':
                cma_model = cascade_comp_inf_model(self.feature_dim, self.args)
                self.cascade_comp_cma = nn.ModuleList([])
                for i in range(self.modal_num):
                    self.cascade_comp_cma.append(copy.deepcopy(cma_model))
                if self.args.ifse_agg == 'T':
                    self.se_agg_model = SELayer(channel=self.modal_num+2, feature_dim=self.feature_dim)
            elif self.args.cma_type == 'comp_inf':
                self.cma_model = comp_inf_model(self.feature_dim, self.modal_num, self.args)
        elif self.args.fusion_type == 'tsa':
            transformer_encoder_layer = nn.TransformerEncoderLayer(d_model=self.feature_dim, nhead=2)
            self.transformer_encoder = nn.TransformerEncoder(transformer_encoder_layer, num_layers=1)

        # if self.ifrecon == 'T':
        #     self.linear_recon = nn.ModuleList([])
        #     for i in range(self.modal_num):
        #         self.linear_recon.append(
        #             nn.Sequential(nn.Linear(self.feature_dim * 2, 256),
        #                           nn.ReLU(),
        #                           nn.Dropout(p=recon_drop),
        #                           nn.Linear(256, 512)))

        if self.args.ifaux_inter == 'T':
            self.linear_cls_inter = nn.Sequential(nn.Linear(self.feature_dim, 32),
                                                  nn.ReLU(),
                                                  nn.Dropout(p=self.args.cls_drop),
                                                  nn.Linear(32, self.num_classes))

        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        # self.cls_drop = nn.Dropout(p=cls_drop)

        # main cls
        if self.args.fusion_type == 'cma':
            if self.args.cma_type == 'cascade_comp_inf' or self.args.cma_type == 'comp_inf':
                if self.args.cma_type == 'cascade_comp_inf' and self.args.ifse_agg == 'T':
                    self.linear_cls = nn.Sequential(nn.Linear(self.feature_dim*(self.modal_num+2), self.feature_dim),
                                                    nn.ReLU(),
                                                    nn.Dropout(p=self.args.cls_drop),
                                                    nn.Linear(self.feature_dim, 32),
                                                    nn.ReLU(),
                                                    nn.Dropout(p=self.args.cls_drop),
                                                    nn.Linear(32, self.num_classes))
                else:
                    self.linear_cls = nn.Sequential(nn.Linear(self.feature_dim, 32),
                                                    nn.ReLU(),
                                                    nn.Dropout(p=self.args.cls_drop),
                                                    nn.Linear(32, self.num_classes))
            elif self.args.cma_type == 'concat_q':
                self.linear_cls = nn.Sequential(nn.Linear(self.feature_dim, 32),
                                                nn.ReLU(),
                                                nn.Dropout(p=self.args.cls_drop),
                                                nn.Linear(32, self.num_classes))
        elif self.args.fusion_type == 'concat':
            self.linear_cls = nn.Sequential(  # nn.BatchNorm1d(self.feature_dim),
                # nn.ReLU(),
                # nn.Dropout(p=self.args.cls_drop),
                nn.Linear(self.feature_dim*(self.modal_num+1), 32),
                # nn.BatchNorm1d(32),
                nn.ReLU(),
                nn.Dropout(p=self.args.cls_drop),
                nn.Linear(32, self.num_classes))
        elif self.args.fusion_type == 'tsa':
            self.linear_cls = nn.Sequential(nn.Linear(self.feature_dim*(self.modal_num*2+1), self.feature_dim),
                                            nn.ReLU(),
                                            nn.Dropout(p=self.args.cls_drop),
                                            nn.Linear(self.feature_dim, 32),
                                            nn.ReLU(),
                                            nn.Dropout(p=self.args.cls_drop),
                                            nn.Linear(32, self.num_classes))
        else:
            self.linear_cls = nn.Sequential(nn.Linear(self.feature_dim, 32),
                                            nn.ReLU(),
                                            nn.Dropout(p=self.args.cls_drop),
                                            nn.Linear(32, self.num_classes))

        if self.args.sim_type == 'MI':
            self.mutual_info = MI_pytorch(bins=20, min=0, max=1, sigma=100, reduction='batchmean')

    def weight_expand(self, weight, shape):
        # new_label = label.repeat(1,shape[1]*shape[2]*shape[3]*shape[4])
        new_weight = weight.repeat(1, shape[1])
        new_weight = torch.reshape(new_weight, shape)
        return new_weight

    def mixup_data(self, x, y, alpha=1.0, use_cuda=True):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha)
        else:
            lam = 1

        batch_size = x.size()[0]
        if use_cuda:
            index = torch.randperm(batch_size).cuda()
        else:
            index = torch.randperm(batch_size)

        mixed_x = lam * x + (1 - lam) * x[index, :]
        y_a, y_b = y, y[index]
        return mixed_x, y_a, y_b, lam

    def mixup_criterion(self, criterion, pred, y_a, y_b, lam):
        return lam * torch.mean(criterion(pred, y_a)) + (1 - lam) * torch.mean(criterion(pred, y_b))

    def calc_sim(self, x1, x2):
        if self.args.sim_type == 'cosine':
            # return 1-torch.cosine_similarity(x1,x2,dim=-1)
            return torch.abs(torch.cosine_similarity(x1, x2, dim=-1))
        elif self.args.sim_type == 'l2norm':
            return self.mse_loss(x1,x2)
        elif self.args.sim_type == 'MI':
            # x1_np = x1.cpu().numpy()
            # x2_np = x2.cpu().numpy()
            # mi_score = 0
            # for i in range(x1_np.shape[0]):
            #     mi_score = mi_score + mutual_info_score(x1_np[i], x2_np[i])
            # return torch.as_tensor(mi_score / x1_np.shape[0]).cuda()
            return self.mutual_info(self.sigmoid(x1),self.sigmoid(x2))

    def calc_con_loss_last(self, anchor, pos, neg_list):

        cs_anchor_pos = torch.exp(self.calc_sim(anchor, pos) / self.args.tau)
        cs_anchor_negs = 0
        cs_neg_negs = 0
        for i in range(len(neg_list)):
            cs_anchor_negs += torch.exp(self.calc_sim(anchor, neg_list[i]) / self.args.tau)
            new_neg_list = list(range(i + 1, len(neg_list)))
            for j in new_neg_list:
                cs_neg_negs += torch.exp(self.calc_sim(neg_list[i], neg_list[j]) / self.args.tau)
        # print('cs_anchor_pos:', str(cs_anchor_pos))
        # print('cs_anchor_negs:', str(cs_anchor_negs))
        # print('cs_neg_negs:', str(cs_neg_negs))
        if self.args.sim_type == 'cosine' or self.args.sim_type == 'MI':
            loss = torch.mean(-torch.log(cs_anchor_pos / (cs_anchor_pos + cs_anchor_negs + cs_neg_negs)+self.eps))
        elif self.args.sim_type == 'l2norm':
            # print('cs_anchor_pos:',str(self.sigmoid(cs_anchor_pos)))
            # print('cs_anchor_negs:',str(self.sigmoid(cs_anchor_negs)))
            # print('cs_neg_negs:',str(self.sigmoid(cs_neg_negs)))
            loss = (cs_anchor_pos + self.eps) / (cs_anchor_negs + cs_neg_negs)
        else:
            print('wrong sim_type input!')
        return loss

    def contrastive_loss_last(self, pos_list, neg_list):
        con_loss = 0
        for i in range(len(pos_list)):
            anchor = pos_list[i]
            new_pos_list = list(range(len(pos_list)))
            new_pos_list.remove(i)
            for j in new_pos_list:
                pos = pos_list[j]
                con_loss += self.calc_con_loss(anchor, pos, neg_list)
        return con_loss / len(pos_list)

    def calc_con_loss(self, anchor, pos, neg_list):
        cs_anchor_pos = torch.exp(self.calc_sim(anchor, pos) / self.args.tau)
        cs_anchor_negs = 0
        for i in range(len(neg_list)):
            cs_anchor_negs += torch.exp(self.calc_sim(anchor, neg_list[i]) / self.args.tau)
        # print('cs_anchor_pos:', str(cs_anchor_pos))
        # print('cs_anchor_negs:', str(cs_anchor_negs))
        # print('cs_neg_negs:', str(cs_neg_negs))
        if self.args.sim_type == 'cosine' or self.args.sim_type == 'MI':
            loss = torch.mean(-torch.log(cs_anchor_pos / (cs_anchor_pos + cs_anchor_negs)+self.eps))
        elif self.args.sim_type == 'l2norm':
            # print('cs_anchor_pos:',str(self.sigmoid(cs_anchor_pos)))
            # print('cs_anchor_negs:',str(self.sigmoid(cs_anchor_negs)))
            # print('cs_neg_negs:',str(self.sigmoid(cs_neg_negs)))
            loss = (cs_anchor_pos + self.eps) / cs_anchor_negs
        else:
            print('wrong sim_type input!')
        return loss

    def contrastive_loss(self, pos_list, neg_list):
        con_loss = 0
        for i in range(len(pos_list)):
            anchor = pos_list[i]
            new_pos_list = list(range(len(pos_list)))
            new_pos_list.remove(i)
            for j in new_pos_list:
                pos = pos_list[j]
                con_loss += self.calc_con_loss(anchor, pos, neg_list)
        return con_loss / len(pos_list)

    def dot_product_angle(self, v1, v2):
        v1 = torch.squeeze(v1)
        v2 = torch.squeeze(v2)
        if torch.linalg.vector_norm(v1) == 0 or torch.linalg.vector_norm(v2) == 0:
            print("Zero magnitude vector!")
        else:
            vector_dot_product = torch.dot(v1, v2)
            arccos = torch.arccos(vector_dot_product / (torch.linalg.norm(v1) * torch.linalg.norm(v2)))
            angle = math.degrees(arccos)
            return angle

    def expand_dim(self, x, type):
        if type == '1d-3d':
            x = torch.unsqueeze(x, dim=-1)
            x = torch.unsqueeze(x, dim=-1)
            return x.expand(-1, 2, self.feature_dim).cuda()
        elif type == '1d-2d':
            x = torch.unsqueeze(x, dim=-1)
            return x.expand(-1, 2).cuda()
        elif type == '2d-3d':
            x = torch.unsqueeze(x, dim=-1)
            return x.expand(-1, -1, self.feature_dim).cuda()

    def sa_operation(self,x,name):
        norm_fact = 1 / sqrt(x.shape[1])
        if name == 'inter':
            q = self.linear_q_inter(x)
            k = self.linear_k_inter(x)
            v = self.linear_v_inter(x)
        elif name == 'k':
            q = self.linear_q_k(x)
            k = self.linear_k_k(x)
            v = self.linear_v_k(x)
        elif name == 'v':
            q = self.linear_q_v(x)
            k = self.linear_k_v(x)
            v = self.linear_v_v(x)
        elif name == 'q0':
            q = self.linear_q_q[0](x)
            k = self.linear_k_q[0](x)
            v = self.linear_v_q[0](x)
        elif name == 'q1':
            q = self.linear_q_q[1](x)
            k = self.linear_k_q[1](x)
            v = self.linear_v_q[1](x)
        elif name == 'q2':
            q = self.linear_q_q[2](x)
            k = self.linear_k_q[2](x)
            v = self.linear_v_q[2](x)
        elif name == 'q3':
            q = self.linear_q_q[3](x)
            k = self.linear_k_q[3](x)
            v = self.linear_v_q[3](x)
        else:
            print('wrong input name in fun(sa_operation)!')

        if self.args.ifsa_addrelu=='T':
            q = self.relu(q)
            k = self.relu(k)
            v = self.relu(v)

        att = torch.bmm(torch.unsqueeze(q, dim=2),torch.unsqueeze(k, dim=2).transpose(1, 2)) * norm_fact  # [batch,input_dim,input_dim]
        sa = torch.bmm(torch.softmax(att, dim=-1), torch.unsqueeze(v, dim=2))
        return torch.squeeze(sa,dim=-1)

    def cma_operation(self,q,k,v,norm_fact):
        att = torch.bmm(torch.unsqueeze(q, dim=2), torch.unsqueeze(k, dim=2).transpose(1, 2)) * norm_fact  # [batch,input_dim,input_dim]
        ca = torch.bmm(torch.softmax(att, dim=-1), torch.unsqueeze(v, dim=2))
        return ca

    def CMA(self, inter, q_list):

        if self.args.cma_type == 'cascade_comp_inf': # cascade complete information
            x_fusion_list = []
            x_fusion_list.append(inter)
            for i in range(len(q_list)):
                if self.args.cascade_order == 'order':
                    j = i
                elif self.args.cascade_order == 'reverse':
                    j = len(q_list)-1-i
                else:
                    print('Wrong cascade oreder input!')
                    break

                if i == 0:
                    if self.args.ifse_agg == 'T':
                        concat_fusion = torch.cat((torch.unsqueeze(inter,dim=1),torch.unsqueeze(q_list[j],dim=1)),dim=1)
                    x_fusion = self.cascade_comp_cma[j](inter, q_list[j], True)
                    x_fusion_list.append(x_fusion)
                else:
                    if self.args.ifse_agg == 'T':
                        concat_fusion = torch.cat((concat_fusion,torch.unsqueeze(q_list[j],dim=1)),dim=1)
                    x_fusion = self.cascade_comp_cma[j](x_fusion, q_list[j], False)
                    x_fusion_list.append(x_fusion)
            if self.args.ifse_agg == 'T':
                concat_fusion = torch.cat((concat_fusion, torch.unsqueeze(x_fusion, dim=1)),dim=1)
                x_se = self.se_agg_model(concat_fusion)
                out_att = x_se
            else:
                out_att = x_fusion
        elif self.args.cma_type == 'comp_inf':
            out_att = self.cma_model(inter, q_list)
        elif self.args.cma_type == 'concat_q':
            # get sup information of v
            for i in range(len(q_list)):
                if i == 0:
                    q_concat = q_list[i]
                else:
                    q_concat = torch.cat((q_concat,q_list[i]),dim=-1)
            out_att = self.cascade_comp_cma(inter, self.linear_q(q_concat), True)
        else:
            print('wrong cma_type input!')

        return {'out': out_att,
                'x_fusion_list':x_fusion_list}

    def dot_product_angle_np(self, v1, v2):
        v1 = np.squeeze(v1)
        v2 = np.squeeze(v2)
        if np.linalg.norm(v1) == 0 or np.linalg.norm(v2) == 0:
            print("Zero magnitude vector!")
        else:
            vector_dot_product = np.dot(v1, v2)
            arccos = np.arccos(vector_dot_product / (np.linalg.norm(v1) * np.linalg.norm(v2)))
            angle = np.degrees(arccos)
            return angle
        return 0

    def calc_cross_recon_loss(self,x):
        return torch.mean(torch.pow(torch.sum(x**2,dim=1),0.5))

    def sup_loss_operarion(self,input1, input2):

        batch_size = input1.size(0)
        input1 = input1.view(batch_size, -1)
        input2 = input2.view(batch_size, -1)

        # Zero mean
        input1_mean = torch.mean(input1, dim=0, keepdims=True)
        input2_mean = torch.mean(input2, dim=0, keepdims=True)
        input1 = input1 - input1_mean
        input2 = input2 - input2_mean
        print('input1:', input1.shape)

        input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach()
        print('input1_l2_norm:', input1_l2_norm.shape)
        input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-6)
        print('input1_l2_norm:', input1_l2_norm.shape)

        input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach()
        input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-6)

        sup_loss = torch.mean((input1_l2.t().mm(input2_l2)).pow(2))

        return sup_loss

    def calc_sup_loss(self, x_sup_list, x_inter_list, x_dif_list):
        for i in range(len(x_sup_list)):
            if i == 0:
                sup_loss = self.sup_loss_operarion(x_sup_list[i], x_inter_list[i])
                sup_loss += self.sup_loss_operarion(x_sup_list[i], x_dif_list[i])
            else:
                sup_loss += self.sup_loss_operarion(x_sup_list[i], x_inter_list[i])
                sup_loss += self.sup_loss_operarion(x_sup_list[i], x_dif_list[i])
        return sup_loss


    def calc_sup_dif_loss(self, x_sup_dif_list):
        for i in range(len(x_sup_dif_list)):
            if i < self.modal_num:
                for j in range(i,len(x_sup_dif_list)):
                    if i == 0 and j == 0:
                        sup_dif_loss = self.sup_loss_operarion(x_sup_dif_list[i], x_sup_dif_list[j])
                    else:
                        sup_dif_loss += self.sup_loss_operarion(x_sup_dif_list[i], x_sup_dif_list[j])
        return sup_dif_loss

    def cross_recon_op(self,x_ori_list,x_inter_list,x_dif_list):
        # cross_recon_loss = 0
        for i in range(len(x_dif_list)):
            for j in range(len(x_inter_list)):
                x_recon = self.decoder_recon[len(x_dif_list)*i+j](torch.cat((x_dif_list[i],x_inter_list[j]),dim=-1))
                # cross_recon_loss+=self.calc_cross_recon_loss(x_ori_list[i]-x_recon)
                if i == 0 and j == 0:
                    cross_recon_loss = torch.sum(self.calc_sim(x_ori_list[i],x_recon))/x_ori_list[0].shape[0]
                else:
                    cross_recon_loss += torch.sum(self.calc_sim(x_ori_list[i],x_recon))/x_ori_list[0].shape[0]
        return cross_recon_loss

    def recon_op(self,x_ori_list,x_inter_list, x_inter_sup_list, x_dif_list):
        assert len(x_ori_list) == len(x_inter_list) == len(x_inter_sup_list) == len(x_dif_list)
        for i in range(len(x_ori_list)):
            x_recon = self.decoder_recon[i](torch.cat((x_inter_list[i], x_inter_sup_list[i], x_dif_list[i]), dim=-1))
            if i == 0:
                recon_loss = torch.sum(self.calc_sim(x_ori_list[i], x_recon)) / x_ori_list[0].shape[0]
            else:
                recon_loss += torch.sum(self.calc_sim(x_ori_list[i], x_recon)) / x_ori_list[0].shape[0]
        return recon_loss

    def one_hot_embedding(self,label, num_classes=2):
        # Convert to One Hot Encoding
        y = torch.eye(num_classes)
        return y[label]

    def forward(self, x, label=None, global_step=None, run_type='train'):

        x_ori_list = []
        if self.args.dataset_name == 'ABIDE':
            for i in range(self.modal_num):
                x_ori_list.append(self.backbone_encoder[i](x[i]))
                # print(self.backbone_encoder[i](x[i]).shape)
        else:
            for i in range(self.modal_num):
                data = x[:, i, ...]
                if data.ndim == 4:
                    data = torch.unsqueeze(data, 1)
                x_ori_list.append(self.backbone_encoder[i](data))

        x_inter_list = []
        x_inter_sup_list = []
        x_dif_list = []
        for i in range(self.modal_num):
            if self.args.attention_type == None:
                x_inter_list.append(self.linear_dis_inter[i](x_ori_list[i]))
                x_inter_sup_list.append(self.linear_dis_inter_sup[i](x_ori_list[i]))
                x_dif_list.append(self.linear_dis_dif[i](x_ori_list[i]))
            elif self.args.attention_type == 'sigmoid':
                x_inter_list.append(self.linear_dis_inter[i](torch.mul(self.sigmoid_op(self.linear_inter_att[i](x_ori_list[i])),self.linear_inter_att[i](x_ori_list[i]))))
                x_inter_sup_list.append(self.linear_dis_inter_sup[i](torch.mul(self.sigmoid_op(self.linear_inter_sup_att[i](x_ori_list[i])),self.linear_inter_sup_att[i](x_ori_list[i]))))
                x_dif_list.append(self.linear_dis_dif[i](torch.mul(self.sigmoid_op(self.linear_dif_att[i](x_ori_list[i])),self.linear_dif_att[i](x_ori_list[i]))))

        # 得到inter, inter_sup和 dif特征
        if self.args.inter_fusion_type == 'mean':
            for i in range(len(x_inter_list)):
                if i == 0:
                    x_inter_bf = x_inter_list[i]
                else:
                    x_inter_bf = x_inter_bf+x_inter_list[i]
            x_inter = x_inter_bf/self.modal_num

        elif self.args.inter_fusion_type == 'concat':
            for i in range(len(x_inter_list)):
                if i == 0:
                    x_inter_bf = x_inter_list[i]
                else:
                    x_inter_bf = torch.concat((x_inter_bf,x_inter_list[i]),dim=-1)
            x_inter = self.linear_inter_fusion(x_inter_bf)

        if self.args.inter_sup_fusion_type == 'concat':
            for i in range(len(x_inter_sup_list)):
                if i == 0:
                    x_inter_sup_bf = x_inter_sup_list[i]
                else:
                    x_inter_sup_bf = torch.cat((x_inter_sup_bf, x_inter_sup_list[i]), dim=-1)
            x_inter_sup = self.linear_inter_sup_fusion(x_inter_sup_bf)

        if self.args.inter_inter_sup_fusion_type == 'concat':
            x_inter_all = self.linear_inter_inter_sup_fusion(torch.cat((x_inter,x_inter_sup),dim=-1))
        x_fusion_list = None
        if self.args.fusion_type == 'concat':
            for i in range(len(x_dif_list)):
                if i == 0:
                    x_fusion = torch.cat((x_inter_all, x_dif_list[i]), dim=1)
                else:
                    x_fusion = torch.cat((x_fusion, x_dif_list[i]), dim=1)
        elif self.args.fusion_type == 'cma':
            cma_out = self.CMA(x_inter_all, x_dif_list)
            x_fusion = cma_out['out']
            x_fusion_list = cma_out['x_fusion_list']
            # if self.args.run_type == 'test':
            #     for i in range(len(x_fusion_list)):
            #         print('entropy of feature {}: {}'.format(str(i),str(x_fusion_list[i].entropy)))
        elif self.args.fusion_type == 'inter_all':
            x_fusion = x_inter_all
        elif self.args.fusion_type == 'tsa':
            for i in range(len(x_inter_sup_list)):
                if i == 0:
                    x_inter_sup_concat = torch.unsqueeze(x_inter_sup_list[i],dim=1)
                    x_dif_concat = torch.unsqueeze(x_dif_list[i],dim=1)
                else:
                    x_inter_sup_concat = torch.cat((x_inter_sup_concat, torch.unsqueeze(x_inter_sup_list[i],dim=1)), dim=1)
                    x_dif_concat = torch.cat((x_dif_concat, torch.unsqueeze(x_dif_list[i],dim=1)), dim=1)

            x_concat = torch.cat((torch.unsqueeze(x_inter,dim=1),x_inter_sup_concat,x_dif_concat),dim=1)
            tsa_out = self.transformer_encoder(x_concat)
            for i in range(tsa_out.shape[1]):
                if i == 0:
                    x_fusion = tsa_out[:,i,:]
                else:
                    x_fusion = torch.cat((x_fusion,tsa_out[:,i,:]),dim=-1)
        out_prediction = self.linear_cls(x_fusion)

        if run_type == 'test':

            # calc angle of vectors:np
            # # inter-inter
            # print('The angle of {} and {} is {}'.format('0_inter', '1_inter', str(self.dot_product_angle_np(x_0_inter.cpu().numpy(), x_1_inter.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('0_inter', '2_inter', str(self.dot_product_angle_np(x_0_inter.cpu().numpy(), x_2_inter.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('1_inter', '2_inter', str(self.dot_product_angle_np(x_1_inter.cpu().numpy(), x_2_inter.cpu().numpy()))))
            # # dif-dif
            # print('The angle of {} and {} is {}'.format('0_dif', '1_dif', str(self.dot_product_angle_np(x_0_dif.cpu().numpy(), x_1_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('0_dif', '2_dif',  str(self.dot_product_angle_np(x_0_dif.cpu().numpy(), x_2_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('1_dif', '2_dif',  str(self.dot_product_angle_np(x_1_dif.cpu().numpy(), x_2_dif.cpu().numpy()))))
            # # inter-dif
            # print('The angle of {} and {} is {}'.format('0_inter', '0_dif',  str(self.dot_product_angle_np(x_0_inter.cpu().numpy(), x_0_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('0_inter', '1_dif',  str(self.dot_product_angle_np(x_0_inter.cpu().numpy(), x_1_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('0_inter', '2_dif',  str(self.dot_product_angle_np(x_0_inter.cpu().numpy(), x_2_dif.cpu().numpy()))))
            #
            # print('The angle of {} and {} is {}'.format('1_inter', '0_dif', str(self.dot_product_angle_np(x_1_inter.cpu().numpy(), x_0_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('1_inter', '1_dif', str(self.dot_product_angle_np(x_1_inter.cpu().numpy(), x_1_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('1_inter', '2_dif', str(self.dot_product_angle_np(x_1_inter.cpu().numpy(), x_2_dif.cpu().numpy()))))
            #
            # print('The angle of {} and {} is {}'.format('2_inter', '0_dif', str(self.dot_product_angle_np(x_2_inter.cpu().numpy(), x_0_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('2_inter', '1_dif', str(self.dot_product_angle_np(x_2_inter.cpu().numpy(), x_1_dif.cpu().numpy()))))
            # print('The angle of {} and {} is {}'.format('2_inter', '2_dif', str(self.dot_product_angle_np(x_2_inter.cpu().numpy(), x_2_dif.cpu().numpy()))))

            # # calc angle of vectors:torch
            # # inter-inter
            # print('The angle of {} and {} is {}'.format('0_inter', '1_inter', str(self.dot_product_angle(x_0_inter, x_1_inter))))
            # print('The angle of {} and {} is {}'.format('0_inter', '2_inter', str(self.dot_product_angle(x_0_inter, x_2_inter))))
            # print('The angle of {} and {} is {}'.format('1_inter', '2_inter', str(self.dot_product_angle(x_1_inter, x_2_inter))))
            # # dif-dif
            # print('The angle of {} and {} is {}'.format('0_dif', '1_dif', str(self.dot_product_angle(x_0_dif, x_1_dif))))
            # print('The angle of {} and {} is {}'.format('0_dif', '2_dif', str(self.dot_product_angle(x_0_dif, x_2_dif))))
            # print('The angle of {} and {} is {}'.format('1_dif', '2_dif', str(self.dot_product_angle(x_1_dif, x_2_dif))))
            # # inter-dif
            # print('The angle of {} and {} is {}'.format('0_inter', '0_dif', str(self.dot_product_angle(x_0_inter, x_0_dif))))
            # print('The angle of {} and {} is {}'.format('0_inter', '1_dif', str(self.dot_product_angle(x_0_inter, x_1_dif))))
            # print('The angle of {} and {} is {}'.format('0_inter', '2_dif', str(self.dot_product_angle(x_0_inter, x_2_dif))))
            #
            # print('The angle of {} and {} is {}'.format('1_inter', '0_dif', str(self.dot_product_angle(x_1_inter, x_0_dif))))
            # print('The angle of {} and {} is {}'.format('1_inter', '1_dif', str(self.dot_product_angle(x_1_inter, x_1_dif))))
            # print('The angle of {} and {} is {}'.format('1_inter', '2_dif', str(self.dot_product_angle(x_1_inter, x_2_dif))))
            #
            # print('The angle of {} and {} is {}'.format('2_inter', '0_dif', str(self.dot_product_angle(x_2_inter, x_0_dif))))
            # print('The angle of {} and {} is {}'.format('2_inter', '1_dif', str(self.dot_product_angle(x_2_inter, x_1_dif))))
            # print('The angle of {} and {} is {}'.format('2_inter', '2_dif', str(self.dot_product_angle(x_2_inter, x_2_dif))))

            return {'out_prediction': out_prediction,
                    'x_inter_list': x_inter_list,
                    'x_inter_sup_list': x_inter_sup_list,
                    'x_dif_list': x_dif_list,
                    'x_ori_list': x_ori_list,
                    'x_fusion_list': x_fusion_list}

        # # loss
        if self.args.uncertainty == 'T':
            label = self.one_hot_embedding(label, self.num_classes)
            main_cls_loss = self.criterion(out_prediction, label.float(), global_step, self.num_classes,self.args.annealing_step)*self.args.w_main_cls
        elif self.args.uncertainty == 'F':
            main_cls_loss = torch.mean(self.criterion(out_prediction, label))*self.args.w_main_cls

        if self.args.ifaux_inter == 'T':
            out_prediction_inter = self.linear_cls_inter(x_inter)
            aux_inter_cls_loss = torch.mean(self.criterion(out_prediction_inter, label))*self.args.w_inter
        elif self.args.ifaux_inter == 'F':
            aux_inter_cls_loss = torch.tensor([0]).cuda()

        x_sup_dif_list = x_inter_sup_list + x_dif_list
        # contrastive loss
        if self.args.ifconloss == 'T':
            # con_loss = self.contrastive_loss(x_inter_list, x_dif_list)*self.args.w_con
            con_loss = self.contrastive_loss(x_inter_list, x_sup_dif_list)*self.args.w_con
        elif self.args.ifconloss == 'F':
            con_loss = torch.tensor([0]).cuda()
        else:
            print('wrong ifconloss!')

        # sup dif loss (计算每个模态中sup分别与inter和dif的loss)
        if self.args.ifsuploss:
            sup_loss = self.calc_sup_dif_loss(x_sup_dif_list)*self.args.w_sup
        else:
            sup_loss = torch.tensor([0]).cuda()

        # cross recon loss
        if self.args.ifrecon == 'T':
            recon_loss = self.recon_op(x_ori_list, x_inter_list, x_inter_sup_list, x_dif_list)*self.args.w_cross_recon
        elif self.args.ifrecon == 'F':
            recon_loss = torch.tensor([0]).cuda()
        else:
            print('wrong ifrecon!')

        loss = main_cls_loss + con_loss + recon_loss + aux_inter_cls_loss + sup_loss

        return {'out_prediction': out_prediction,
                'loss': loss,
                'main_cls_loss': main_cls_loss,
                'aux_inter_cls_loss': aux_inter_cls_loss,
                'con_loss': con_loss,
                'sup_loss': sup_loss,
                'recon_loss': recon_loss}



if __name__ == '__main__':
    criterion = torch.nn.CrossEntropyLoss(reduction='none')
    model = ContrastiveDisFusion(num_classes=3, dropout_value=0.5,model_depth=18,criterion=criterion,fusion_type='cma',dataset_name='brats',cma_type='max')
    # model.cuda()
    # print(model)

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    # print(' 参数量', sum([np.prod(p.size()) for p in model_parameters]))
    # t1 = torch.rand(size=(64, 1, 128, 128, 24))
    # t2 = torch.rand(size=(64, 1, 128, 128, 24))
    # adc = torch.rand(size=(64, 1, 128, 128, 24))

    # all = torch.rand(size=(1, 3, 128, 128, 24))
    all = torch.rand(size=(1, 4, 128, 128, 100))

    label = torch.tensor([0])
    # all = torch.rand(size=(8, 3, 128, 128, 24))
    # label = torch.tensor([0,1,2,2,1,2,0,1])
    # all = all.cuda()
    # label = label.cuda()
    # all = torch.rand(size=(8, 4, 64, 64, 64))
    out = model(all, label)
    print(out['out_prediction'])
    # print(out['spec_cls_t1_loss'])