"""
    Author: Xiaoyu He
    Data: 2021-01-31 21:43
    Reference: DANet
    https://blog.csdn.net/qq_38343151/article/details/106744207
"""


import os
import torch
import torch.nn as nn
from torch.nn import functional as F
import torchsummary
from resnet import resnet18, resnet34, resnet50, resnet101
from configDANet2 import Config as cf
from torch.nn import Parameter, Module
import warnings

# print(torch.version.cuda)

warnings.filterwarnings('ignore')

class CMA_None(Module):
    def __init__(self, in_dim):
        super(CMA_None, self).__init__()
        self.chanel_in = in_dim

    def forward(self, derm, clinic):
        return derm, clinic


class CMA_CBAM(Module):
    def __init__(self, in_dim):
        super(CMA_CBAM, self).__init__()
        self.chanel_in = in_dim
        self.conv_derm = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.conv_clinic = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.para_d = Parameter(torch.zeros(1))
        self.para_c = Parameter(torch.zeros(1))

    def forward(self, derm, clinic):
        derm_a = torch.mean(derm, dim=1, keepdim=True)
        derm_m, _ = torch.max(derm, dim=1, keepdim=True)
        derm_am = self.conv_derm(torch.cat([derm_a, derm_m], dim=1))

        clinic_a = torch.mean(clinic, dim=1, keepdim=True)
        clinic_m, _ = torch.max(clinic, dim=1, keepdim=True)
        clinic_am = self.conv_clinic(torch.cat([clinic_a, clinic_m], dim=1))

        att = F.softmax(torch.cat([derm_am, clinic_am], dim=1), dim=1)
        att_derm = att[:, 0, :, :].unsqueeze(1)
        att_clinic = att[:, 1, :, :].unsqueeze(1)

        derm_refined = att_derm * derm
        # derm_refined = self.para_d * out_derm + derm

        clinic_refined = att_clinic * clinic
        # clinic_refined = self.para_c * out_clinic + clinic

        return derm_refined, clinic_refined


class CMA_CrossCBAM(Module):
    def __init__(self, in_dim):
        super(CMA_CrossCBAM, self).__init__()
        self.chanel_in = in_dim
        self.conv_derm = nn.Conv2d(2, 1, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv_clinic = nn.Conv2d(2, 1, kernel_size=3, stride=1, padding=1, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, derm, clinic):
        derm_a = torch.mean(derm, dim=1, keepdim=True)
        derm_m, _ = torch.max(derm, dim=1, keepdim=True)
        derm_am = self.conv_derm(torch.cat([derm_a, derm_m], dim=1))

        clinic_a = torch.mean(clinic, dim=1, keepdim=True)
        clinic_m, _ = torch.max(clinic, dim=1, keepdim=True)
        clinic_am = self.conv_clinic(torch.cat([clinic_a, clinic_m], dim=1))

        att_derm = self.sigmoid(derm_am)
        att_clinic = self.sigmoid(clinic_am)

        derm_refined = att_clinic * derm
        clinic_refined = att_derm * clinic

        return derm_refined, clinic_refined


class CMA_CrossAndSelfCBAM(Module):
    def __init__(self, in_dim):
        super(CMA_CrossAndSelfCBAM, self).__init__()
        self.chanel_in = in_dim
        self.conv_d2d = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.conv_d2c = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.conv_c2c = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.conv_c2d = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, bias=False)
        self.para_d2c = Parameter(torch.zeros(1))
        self.para_c2d = Parameter(torch.zeros(1))
        self.sigmoid = nn.Sigmoid()

    def forward(self, derm, clinic):
        derm_a_d = torch.mean(derm, dim=1, keepdim=True)
        derm_m_d, _ = torch.max(derm, dim=1, keepdim=True)
        derm_am_d = self.conv_d2d(torch.cat([derm_a_d, derm_m_d], dim=1))
        derm_a_c = torch.mean(derm, dim=1, keepdim=True)
        derm_m_c, _ = torch.max(derm, dim=1, keepdim=True)
        derm_am_c = self.conv_d2c(torch.cat([derm_a_c, derm_m_c], dim=1))

        clinic_a_c = torch.mean(clinic, dim=1, keepdim=True)
        clinic_m_c, _ = torch.max(clinic, dim=1, keepdim=True)
        clinic_am_c = self.conv_c2c(torch.cat([clinic_a_c, clinic_m_c], dim=1))
        clinic_a_d = torch.mean(clinic, dim=1, keepdim=True)
        clinic_m_d, _ = torch.max(clinic, dim=1, keepdim=True)
        clinic_am_d = self.conv_c2d(torch.cat([clinic_a_d, clinic_m_d], dim=1))

        out_d2d = self.sigmoid(derm_am_d) * derm
        out_d2c = self.sigmoid(derm_am_c) * clinic
        out_c2c = self.sigmoid(clinic_am_c) * clinic
        out_c2d = self.sigmoid(clinic_am_d) * derm

        derm_refined = out_d2d + self.para_c2d * out_c2d
        clinic_refined = out_c2c + self.para_d2c * out_d2c

        return derm_refined, clinic_refined


class CMA_DANet(Module):
    def __init__(self, in_dim):
        super(CMA_DANet, self).__init__()
        self.chanel_in = in_dim

        self.conv_3x3 = nn.Conv2d(2*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)
        self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)

        self.key_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.value_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma_d = Parameter(torch.zeros(1))
        self.gamma_c = Parameter(torch.zeros(1))
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, derm, clinic):
        public = self.conv_3x3(torch.cat([derm, clinic], dim=1))

        B, C, H, W = derm.size()
        N = H * W
        proj_query = self.query_conv(public).view(B, -1, N).permute(0, 2, 1)

        proj_key_derm = self.key_conv_d(derm).view(B, -1, N)
        attention_derm = self.softmax_d(torch.bmm(proj_query, proj_key_derm))
        proj_value_derm = self.value_conv_d(derm).view(B, -1, N)
        out_derm = torch.bmm(proj_value_derm, attention_derm.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_derm = out_derm.view(B, C, H, W)
        derm_refined = self.gamma_d * out_derm + derm

        proj_key_clinic = self.key_conv_c(clinic).view(B, -1, N)
        attention_clinic = self.softmax_c(torch.bmm(proj_query, proj_key_clinic))
        proj_value_clinic = self.value_conv_c(clinic).view(B, -1, N)
        out_clinc = torch.bmm(proj_value_clinic, attention_clinic.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_clinc = out_clinc.view(B, C, H, W)
        clinic_refined = self.gamma_c * out_clinc + clinic

        return derm_refined, clinic_refined


class CMA_FuseDANet(Module):
    def __init__(self, in_dim):
        super(CMA_FuseDANet, self).__init__()
        self.chanel_in = in_dim

        self.fuse_d = Parameter(torch.zeros(1))
        self.fuse_c = Parameter(torch.zeros(1))
        self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)

        self.key_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.value_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma_d = Parameter(torch.zeros(1))
        self.gamma_c = Parameter(torch.zeros(1))
        self.softmax_d = nn.Softmax(dim=-1)
        self.softmax_c = nn.Softmax(dim=-1)

    def forward(self, derm, clinic):
        # public = self.conv_3x3(torch.cat([derm, clinic], dim=1))
        public = self.fuse_d * derm + self.fuse_c * clinic
        B, C, H, W = derm.size()
        N = H * W
        proj_query = self.query_conv(public).view(B, -1, N).permute(0, 2, 1)

        proj_key_derm = self.key_conv_d(derm).view(B, -1, N)
        attention_derm = self.softmax_d(torch.bmm(proj_query, proj_key_derm))
        proj_value_derm = self.value_conv_d(derm).view(B, -1, N)
        out_derm = torch.bmm(proj_value_derm, attention_derm.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_derm = out_derm.view(B, C, H, W)
        derm_refined = self.gamma_d * out_derm + derm

        proj_key_clinic = self.key_conv_c(clinic).view(B, -1, N)
        attention_clinic = self.softmax_c(torch.bmm(proj_query, proj_key_clinic))
        proj_value_clinic = self.value_conv_c(clinic).view(B, -1, N)
        out_clinc = torch.bmm(proj_value_clinic, attention_clinic.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_clinc = out_clinc.view(B, C, H, W)
        clinic_refined = self.gamma_c * out_clinc + clinic

        return derm_refined, clinic_refined


class CMA_CrossDANet(Module):
    def __init__(self, in_dim):
        super(CMA_CrossDANet, self).__init__()
        self.chanel_in = in_dim

        # self.conv_3x3 = nn.Conv2d(2*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)
        self.query_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.query_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.value_conv_d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma_d = Parameter(torch.zeros(1))
        self.gamma_c = Parameter(torch.zeros(1))
        self.softmax_d = nn.Softmax(dim=-1)
        self.softmax_c = nn.Softmax(dim=-1)

    def forward(self, derm, clinic):

        B, C, H, W = derm.size()
        N = H * W
        proj_query_derm = self.query_conv_d(derm).view(B, -1, N).permute(0, 2, 1)
        proj_query_clinic = self.query_conv_c(clinic).view(B, -1, N).permute(0, 2, 1)

        proj_key_derm = self.key_conv_d(derm).view(B, -1, N)
        attention_derm = self.softmax_d(torch.bmm(proj_query_clinic, proj_key_derm))
        proj_value_derm = self.value_conv_d(derm).view(B, -1, N)
        out_derm = torch.bmm(proj_value_derm, attention_derm.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_derm = out_derm.view(B, C, H, W)
        derm_refined = self.gamma_d * out_derm + derm

        proj_key_clinic = self.key_conv_c(clinic).view(B, -1, N)
        attention_clinic = self.softmax_c(torch.bmm(proj_query_derm, proj_key_clinic))
        proj_value_clinic = self.value_conv_c(clinic).view(B, -1, N)
        out_clinc = torch.bmm(proj_value_clinic, attention_clinic.permute(0, 2, 1))  # torch.bmm表示批次矩阵乘法
        out_clinc = out_clinc.view(B, C, H, W)
        clinic_refined = self.gamma_c * out_clinc + clinic

        return derm_refined, clinic_refined


class CMA_CrossAndSelfDANet(Module):
    def __init__(self, in_dim):
        super(CMA_CrossAndSelfDANet, self).__init__()
        self.chanel_in = in_dim

        self.query_conv_d2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.query_conv_d2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.query_conv_c2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.query_conv_c2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)

        self.key_conv_d2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_d2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_c2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
        self.key_conv_c2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)

        self.value_conv_d2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_d2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_c2c = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.value_conv_c2d = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)

        self.gamma_d2d = Parameter(torch.zeros(1))
        self.gamma_d2c = Parameter(torch.zeros(1))
        self.gamma_c2c = Parameter(torch.zeros(1))
        self.gamma_c2d = Parameter(torch.zeros(1))

        self.softmax = nn.Softmax(dim=-1)

    def forward(self, derm, clinic):

        B, C, H, W = derm.size()
        N = H * W

        # calc derm_refined
        query_d2d = self.query_conv_d2d(derm).view(B, -1, N).permute(0, 2, 1)
        key_d2d = self.key_conv_d2d(derm).view(B, -1, N)
        attention_d2d = self.softmax(torch.bmm(query_d2d, key_d2d))
        value_d2d = self.value_conv_d2d(derm).view(B, -1, N)
        out_d2d = torch.bmm(value_d2d, attention_d2d.permute(0, 2, 1))
        out_d2d = out_d2d.view(B, C, H, W)

        query_c2d = self.query_conv_c2d(clinic).view(B, -1, N).permute(0, 2, 1)
        key_c2d = self.key_conv_c2d(derm).view(B, -1, N)
        attention_c2d = self.softmax(torch.bmm(query_c2d, key_c2d))
        value_c2d = self.value_conv_c2d(derm).view(B, -1, N)
        out_c2d = torch.bmm(value_c2d, attention_c2d.permute(0, 2, 1))
        out_c2d = out_c2d.view(B, C, H, W)

        derm_refined = self.gamma_d2d * out_d2d + self.gamma_c2d * out_c2d + derm

        # calc clinic_refined
        query_c2c = self.query_conv_c2c(clinic).view(B, -1, N).permute(0, 2, 1)
        key_c2c = self.key_conv_c2c(clinic).view(B, -1, N)
        attention_c2c = self.softmax(torch.bmm(query_c2c, key_c2c))
        value_c2c = self.value_conv_c2c(clinic).view(B, -1, N)
        out_c2c = torch.bmm(value_c2c, attention_c2c.permute(0, 2, 1))
        out_c2c = out_c2c.view(B, C, H, W)

        query_d2c = self.query_conv_d2c(derm).view(B, -1, N).permute(0, 2, 1)
        key_d2c = self.key_conv_d2c(clinic).view(B, -1, N)
        attention_d2c = self.softmax(torch.bmm(query_d2c, key_d2c))
        value_d2c = self.value_conv_d2c(clinic).view(B, -1, N)
        out_d2c = torch.bmm(value_d2c, attention_d2c.permute(0, 2, 1))
        out_d2c = out_d2c.view(B, C, H, W)

        clinic_refined = self.gamma_c2c * out_c2c + self.gamma_d2c * out_d2c + clinic

        return derm_refined, clinic_refined


class CMA_CPFNet(Module):
    def __init__(self, in_dim):
        super(CMA_CPFNet, self).__init__()
        self.fusion = Fusion(in_dim)

    def forward(self, derm, clinic):
        feat = torch.cat([derm, clinic], dim=1)
        feat = self.fusion(feat)
        att = F.softmax(feat, dim=1)
        att_1 = att[:, 0, :, :].unsqueeze(1)
        att_2 = att[:, 1, :, :].unsqueeze(1)
        derm_refined = att_1 * derm
        clinic_refined = att_2 * clinic

        return derm_refined, clinic_refined


class CMA_CPFNetSplit(Module):
    def __init__(self, in_dim):
        super(CMA_CPFNetSplit, self).__init__()
        self.mlp21c_derm = Fusion21c(in_dim)
        self.mlp21c_clinic = Fusion21c(in_dim)

    def forward(self, derm, clinic):
        derm21c = self.mlp21c_derm(derm)
        clinic21c = self.mlp21c_clinic(clinic)
        feat = torch.cat([derm21c, clinic21c], dim=1)
        att = F.softmax(feat, dim=1)
        att_1 = att[:, 0, :, :].unsqueeze(1)
        att_2 = att[:, 1, :, :].unsqueeze(1)
        derm_refined = att_1 * derm
        clinic_refined = att_2 * clinic

        return derm_refined, clinic_refined


class Fusion_CentralNet(Module):
    def __init__(self, in_dim):
        super(Fusion_CentralNet, self).__init__()
        self.chanel_in = in_dim

        self.gamma_d = Parameter(torch.zeros(1))
        self.gamma_c = Parameter(torch.zeros(1))
        self.gamma_h = Parameter(torch.zeros(1))

    def forward(self, derm, clinic, hyper, is_first):
        if is_first:
            hyper = self.gamma_d * derm + self.gamma_c * clinic
        else:
            hyper = self.gamma_d * derm + self.gamma_c * clinic + self.gamma_h * hyper
        return hyper


class Fusion_AddCatSE(Module):
    def __init__(self, in_dim):
        super(Fusion_AddCatSE, self).__init__()
        self.chanel_in = in_dim

        self.conv_3x3 = nn.Conv2d(2*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)
        self.fc1 = nn.Conv2d(in_dim, in_dim//16, kernel_size=1)
        self.fc2 = nn.Conv2d(in_dim//16, in_dim, kernel_size=1)

    def forward(self, derm, clinic, hyper, is_first):
        if is_first:
            fusion = derm + clinic
        else:
            fusion = derm + clinic
            fusion = torch.cat([fusion, hyper], dim=1)
            fusion = self.conv_3x3(fusion)
        w = F.avg_pool2d(fusion, fusion.size(2))
        w = F.relu(self.fc1(w))
        w = F.sigmoid(self.fc2(w))
        fusion = w * fusion
        return fusion


class Fusion_AddCat(Module):
    def __init__(self, in_dim):
        super(Fusion_AddCat, self).__init__()
        self.chanel_in = in_dim

        self.conv_3x3 = nn.Conv2d(2*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)
        self.fc1 = nn.Conv2d(in_dim, in_dim//16, kernel_size=1)
        self.fc2 = nn.Conv2d(in_dim//16, in_dim, kernel_size=1)

    def forward(self, derm, clinic, hyper, is_first):
        if is_first:
            fusion = derm + clinic
        else:
            fusion = derm + clinic
            fusion = torch.cat([fusion, hyper], dim=1)
            fusion = self.conv_3x3(fusion)
        return fusion


class Fushion_Cat(Module):
    def __init__(self, in_dim):
        super(Fushion_Cat, self).__init__()
        self.chanel_in = in_dim

        self.conv_3x3_3 = nn.Conv2d(3*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv_3x3_2 = nn.Conv2d(2*in_dim, in_dim, kernel_size=3, stride=1, padding=1, bias=False)

    def forward(self, derm, clinic, hyper, is_first):
        if is_first:
            fusion = self.conv_3x3_2(torch.cat([derm, clinic], dim=1))
        else:
            fusion = self.conv_3x3_3(torch.cat([derm, clinic, hyper], dim=1))
        return fusion


class PredictLayer(Module):
    def __init__(self, in_dim, output_dim):
        super(PredictLayer, self).__init__()

        self.predict_layer = nn.Sequential(
            nn.Linear(in_dim, 256),
            nn.ReLU(),
            nn.Dropout(0.4),
            nn.Linear(256, output_dim),
        )

    def forward(self, x):
        x = self.predict_layer(x)
        return x


class MultiTaskPredictHead(Module):
    def __init__(self, in_dim):
        super(MultiTaskPredictHead, self).__init__()
        # PN SR PIG RS DaG BWV VS Diag
        self.number_of_classes = [3, 3, 3, 2, 3, 2, 3, 5]
        self.predict_1 = PredictLayer(in_dim, self.number_of_classes[0])
        self.predict_2 = PredictLayer(in_dim, self.number_of_classes[1])
        self.predict_3 = PredictLayer(in_dim, self.number_of_classes[2])
        self.predict_4 = PredictLayer(in_dim, self.number_of_classes[3])
        self.predict_5 = PredictLayer(in_dim, self.number_of_classes[4])
        self.predict_6 = PredictLayer(in_dim, self.number_of_classes[5])
        self.predict_7 = PredictLayer(in_dim, self.number_of_classes[6])
        self.predict_8 = PredictLayer(in_dim, self.number_of_classes[7])

    def forward(self, x):
        output = [self.predict_1(x), self.predict_2(x), self.predict_3(x), self.predict_4(x),
                  self.predict_5(x), self.predict_6(x), self.predict_7(x), self.predict_8(x)]
        return output


class Fusion(nn.Module):
    def __init__(self, a):
        super(Fusion, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=2*a, out_channels=a, dilation=1, kernel_size=1, padding=0)
        self.conv2 = nn.Conv2d(in_channels=a, out_channels=a//2, dilation=1, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(in_channels=a//2, out_channels=2, dilation=1, kernel_size=3, padding=1)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        feat = self.conv1(x)
        feat = self.relu(feat)
        feat = self.conv2(feat)
        feat = self.relu(feat)
        feat = self.conv3(feat)
        return feat


class Fusion21c(nn.Module):
    def __init__(self, a):
        super(Fusion21c, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=a, out_channels=a//2, dilation=1, kernel_size=1, padding=0)
        self.conv2 = nn.Conv2d(in_channels=a//2, out_channels=a//4, dilation=1, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(in_channels=a//4, out_channels=1, dilation=1, kernel_size=3, padding=1)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        feat = self.conv1(x)
        feat = self.relu(feat)
        feat = self.conv2(feat)
        feat = self.relu(feat)
        feat = self.conv3(feat)
        return feat


class BaseNet(nn.Module):
    def __init__(self):
        super(BaseNet, self).__init__()
        self.network = cf.network

        if self.network == "resnet18":
            self.backbone_derm = resnet18(pretrained=True)
            self.backbone_clinic = resnet18(pretrained=True)
            self.backbone_hyper = resnet18(pretrained=True)
            self.in_c = [64, 64, 128, 256, 512]
        elif self.network == "resnet34":
            self.backbone_derm = resnet34(pretrained=True)
            self.backbone_clinic = resnet34(pretrained=True)
            self.backbone_hyper = resnet34(pretrained=True)
            self.in_c = [64, 64, 128, 256, 512]
        elif self.network == "resnet50":
            self.backbone_derm = resnet50(pretrained=True)
            self.backbone_clinic = resnet50(pretrained=True)
            self.backbone_hyper = resnet50(pretrained=True)
            self.in_c = [64, 256, 512, 1024, 2048]
        elif self.network == "resnet101":
            self.backbone_derm = resnet101(pretrained=True)
            self.backbone_clinic = resnet101(pretrained=True)
            self.backbone_hyper = resnet101(pretrained=True)
            self.in_c = [64, 256, 512, 1024, 2048]

        if cf.cma_method == 'cpfnet':
            self.cma_module_1 = CMA_CPFNet(self.in_c[1])
            self.cma_module_2 = CMA_CPFNet(self.in_c[2])
            self.cma_module_3 = CMA_CPFNet(self.in_c[3])
            self.cma_module_4 = CMA_CPFNet(self.in_c[4])
        elif cf.cma_method == 'cpfnetsplit':
            self.cma_module_1 = CMA_CPFNetSplit(self.in_c[1])
            self.cma_module_2 = CMA_CPFNetSplit(self.in_c[2])
            self.cma_module_3 = CMA_CPFNetSplit(self.in_c[3])
            self.cma_module_4 = CMA_CPFNetSplit(self.in_c[4])
        elif cf.cma_method == 'danet':
            self.cma_module_1 = CMA_DANet(self.in_c[1])
            self.cma_module_2 = CMA_DANet(self.in_c[2])
            self.cma_module_3 = CMA_DANet(self.in_c[3])
            self.cma_module_4 = CMA_DANet(self.in_c[4])
        elif cf.cma_method == 'fuseda':
            self.cma_module_1 = CMA_FuseDANet(self.in_c[1])
            self.cma_module_2 = CMA_FuseDANet(self.in_c[2])
            self.cma_module_3 = CMA_FuseDANet(self.in_c[3])
            self.cma_module_4 = CMA_FuseDANet(self.in_c[4])
        elif cf.cma_method == 'crossda':
            self.cma_module_1 = CMA_CrossDANet(self.in_c[1])
            self.cma_module_2 = CMA_CrossDANet(self.in_c[2])
            self.cma_module_3 = CMA_CrossDANet(self.in_c[3])
            self.cma_module_4 = CMA_CrossDANet(self.in_c[4])
        elif cf.cma_method == 'crossandselfda':
            self.cma_module_1 = CMA_CrossAndSelfDANet(self.in_c[1])
            self.cma_module_2 = CMA_CrossAndSelfDANet(self.in_c[2])
            self.cma_module_3 = CMA_CrossAndSelfDANet(self.in_c[3])
            self.cma_module_4 = CMA_CrossAndSelfDANet(self.in_c[4])
        elif cf.cma_method == 'cbam':
            self.cma_module_1 = CMA_CBAM(self.in_c[1])
            self.cma_module_2 = CMA_CBAM(self.in_c[2])
            self.cma_module_3 = CMA_CBAM(self.in_c[3])
            self.cma_module_4 = CMA_CBAM(self.in_c[4])
        elif cf.cma_method == 'crosscbam':
            self.cma_module_1 = CMA_CrossCBAM(self.in_c[1])
            self.cma_module_2 = CMA_CrossCBAM(self.in_c[2])
            self.cma_module_3 = CMA_CrossCBAM(self.in_c[3])
            self.cma_module_4 = CMA_CrossCBAM(self.in_c[4])
        elif cf.cma_method == 'crossandselfcbam':
            self.cma_module_1 = CMA_CrossAndSelfCBAM(self.in_c[1])
            self.cma_module_2 = CMA_CrossAndSelfCBAM(self.in_c[2])
            self.cma_module_3 = CMA_CrossAndSelfCBAM(self.in_c[3])
            self.cma_module_4 = CMA_CrossAndSelfCBAM(self.in_c[4])
        elif cf.cma_method == 'none':
            self.cma_module_1 = CMA_None(self.in_c[1])
            self.cma_module_2 = CMA_None(self.in_c[2])
            self.cma_module_3 = CMA_None(self.in_c[3])
            self.cma_module_4 = CMA_None(self.in_c[4])
        else:
            print("error!!!!!!!!!!!")

        if cf.fusion_method == 'addcatse':
            self.fusion_module_1 = Fusion_AddCatSE(self.in_c[1])
            self.fusion_module_2 = Fusion_AddCatSE(self.in_c[2])
            self.fusion_module_3 = Fusion_AddCatSE(self.in_c[3])
            self.fusion_module_4 = Fusion_AddCatSE(self.in_c[4])
        elif cf.fusion_method == 'addcat':
            self.fusion_module_1 = Fusion_AddCat(self.in_c[1])
            self.fusion_module_2 = Fusion_AddCat(self.in_c[2])
            self.fusion_module_3 = Fusion_AddCat(self.in_c[3])
            self.fusion_module_4 = Fusion_AddCat(self.in_c[4])
        elif cf.fusion_method == 'centralnet':
            self.fusion_module_1 = Fusion_CentralNet(self.in_c[1])
            self.fusion_module_2 = Fusion_CentralNet(self.in_c[2])
            self.fusion_module_3 = Fusion_CentralNet(self.in_c[3])
            self.fusion_module_4 = Fusion_CentralNet(self.in_c[4])
        elif cf.fusion_method == 'cat':
            self.fusion_module_1 = Fushion_Cat(self.in_c[1])
            self.fusion_module_2 = Fushion_Cat(self.in_c[2])
            self.fusion_module_3 = Fushion_Cat(self.in_c[3])
            self.fusion_module_4 = Fushion_Cat(self.in_c[4])
        else:
            print("error!!!!!!!!!!!")

        self.head_derm = MultiTaskPredictHead(self.in_c[4])
        self.head_clinic = MultiTaskPredictHead(self.in_c[4])
        self.head_hyper = MultiTaskPredictHead(self.in_c[4])
        self.head_fusion = MultiTaskPredictHead(3*self.in_c[4])

    def forward(self, clinic, derm):
        # x1: clinical x2: dermoscopic

        derm = self.backbone_derm.conv1(derm)
        derm = self.backbone_derm.bn1(derm)
        derm = self.backbone_derm.relu(derm)
        derm = self.backbone_derm.maxpool(derm)
        derm_1 = self.backbone_derm.layer1(derm)

        clinic = self.backbone_clinic.conv1(clinic)
        clinic = self.backbone_clinic.bn1(clinic)
        clinic = self.backbone_clinic.relu(clinic)
        clinic = self.backbone_clinic.maxpool(clinic)
        clinic_1 = self.backbone_clinic.layer1(clinic)

        derm_1, clinic_1 = self.cma_module_1(derm_1, clinic_1)
        hyper_1 = self.fusion_module_1(derm_1, clinic_1, None, is_first=True)

        derm_2 = self.backbone_derm.layer2(derm_1)
        clinic_2 = self.backbone_clinic.layer2(clinic_1)
        hyper_2 = self.backbone_hyper.layer2(hyper_1)

        derm_2, clinic_2 = self.cma_module_2(derm_2, clinic_2)
        hyper_2 = self.fusion_module_2(derm_2, clinic_2, hyper_2, is_first=False)

        derm_3 = self.backbone_derm.layer3(derm_2)
        clinic_3 = self.backbone_clinic.layer3(clinic_2)
        hyper_3 = self.backbone_hyper.layer3(hyper_2)

        derm_3, clinic_3 = self.cma_module_3(derm_3, clinic_3)
        hyper_3 = self.fusion_module_3(derm_3, clinic_3, hyper_3, is_first=False)

        derm_4 = self.backbone_derm.layer4(derm_3)
        clinic_4 = self.backbone_clinic.layer4(clinic_3)
        hyper_4 = self.backbone_hyper.layer4(hyper_3)

        derm_4, clinic_4 = self.cma_module_4(derm_4, clinic_4)
        hyper_4 = self.fusion_module_4(derm_4, clinic_4, hyper_4, is_first=False)

        derm = F.adaptive_avg_pool2d(derm_4, (1, 1))
        derm = torch.flatten(derm, 1)
        output_derm = self.head_derm(derm)

        clinic = F.adaptive_avg_pool2d(clinic_4, (1, 1))
        clinic = torch.flatten(clinic, 1)
        output_clinic = self.head_clinic(clinic)

        hyper = F.adaptive_avg_pool2d(hyper_4, (1, 1))
        hyper = torch.flatten(hyper, 1)
        output_hyper = self.head_hyper(hyper)

        fusion = torch.cat([derm, clinic, hyper], dim=1)
        output_fusion = self.head_fusion(fusion)

        return output_clinic, output_derm, output_hyper, output_fusion


if __name__ == '__main__':
    print(cf.this_time_log)
    model = BaseNet()
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    model.cuda()
    torchsummary.summary(model, [(3, 224, 224), (3, 224, 224)])
