import torch
import torch.nn as nn
from sympy.physics.pring import energy
from torch.nn import init
from resnet import resnet50, resnet18
import torch.nn.functional as F


class Normalize(nn.Module):
    def __init__(self, power=2):
        super(Normalize, self).__init__()
        self.power = power

    def forward(self, x):
        norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
        out = x.div(norm)
        return out


# #####################################################################
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
        init.zeros_(m.bias.data)
    elif classname.find('BatchNorm1d') != -1:
        init.normal_(m.weight.data, 1.0, 0.01)
        init.zeros_(m.bias.data)


def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal_(m.weight.data, 0, 0.001)
        if m.bias:
            init.zeros_(m.bias.data)


class visible_module(nn.Module):
    def __init__(self, arch='resnet50'):
        super(visible_module, self).__init__()

        model_v = resnet50(pretrained=True,
                           last_conv_stride=1, last_conv_dilation=1)
        # avg pooling to global pooling
        self.visible = model_v

    def forward(self, x):
        x = self.visible.conv1(x)
        x = self.visible.bn1(x)
        x = self.visible.relu(x)
        x = self.visible.maxpool(x)
        return x


class thermal_module(nn.Module):
    def __init__(self, arch='resnet50'):
        super(thermal_module, self).__init__()

        model_t = resnet50(pretrained=True,
                           last_conv_stride=1, last_conv_dilation=1)
        # avg pooling to global pooling
        self.thermal = model_t

    def forward(self, x):
        x = self.thermal.conv1(x)
        x = self.thermal.bn1(x)
        x = self.thermal.relu(x)
        x = self.thermal.maxpool(x)
        return x


class base_resnet(nn.Module):
    def __init__(self, arch='resnet50'):
        super(base_resnet, self).__init__()

        model_base = resnet50(pretrained=True,
                              last_conv_stride=1, last_conv_dilation=1)
        # avg pooling to global pooling
        model_base.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.base = model_base

    def forward(self, x):
        x = self.base.layer1(x)
        x = self.base.layer2(x)
        x = self.base.layer3(x)
        x = self.base.layer4(x)
        return x


class DEE_module(nn.Module):
    def __init__(self, channel, reduction=16):
        super(DEE_module, self).__init__()
        # 这三个空洞卷积将输入的通道数变为原来的四分之一，宽高不变
        self.FC11 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=1, bias=False, dilation=1)
        self.FC11.apply(weights_init_kaiming)
        self.FC12 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=2, bias=False, dilation=2)
        self.FC12.apply(weights_init_kaiming)
        self.FC13 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=3, bias=False, dilation=3)
        self.FC13.apply(weights_init_kaiming)
        # 恢复到原来的通道数，宽高不变
        self.FC1 = nn.Conv2d(channel // 4, channel, kernel_size=1)
        self.FC1.apply(weights_init_kaiming)

        self.FC21 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=1, bias=False, dilation=1)
        self.FC21.apply(weights_init_kaiming)
        self.FC22 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=2, bias=False, dilation=2)
        self.FC22.apply(weights_init_kaiming)
        self.FC23 = nn.Conv2d(channel, channel // 4, kernel_size=3, stride=1, padding=3, bias=False, dilation=3)
        self.FC23.apply(weights_init_kaiming)
        self.FC2 = nn.Conv2d(channel // 4, channel, kernel_size=1)
        self.FC2.apply(weights_init_kaiming)
        self.dropout = nn.Dropout(p=0.01)

    def forward(self, x):
        x1 = (self.FC11(x) + self.FC12(x) + self.FC13(x)) / 3
        x1 = self.FC1(F.relu(x1))
        x2 = (self.FC21(x) + self.FC22(x) + self.FC23(x)) / 3
        x2 = self.FC2(F.relu(x2))
        out = torch.cat((x, x1, x2), 0)
        out = self.dropout(out)
        return out


class TeacherChannelAttention(nn.Module):
    def __init__(self, C, reduction_ratio=1):
        super(TeacherChannelAttention, self).__init__()
        self.C = C
        self.reduction_ratio = reduction_ratio
        # V==>g
        self.g = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
        # Q==>theta
        self.theta = nn.Conv2d(self.C, self.C // reduction_ratio, kernel_size=1, stride=1, padding=0)
        # K==>phi
        self.phi = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
        self.W = nn.Sequential(nn.Conv2d(self.C // self.reduction_ratio, self.C, kernel_size=1, stride=1, padding=0),
                               nn.BatchNorm2d(self.C), )
        nn.init.constant_(self.W[1].weight, 0.0)
        nn.init.constant_(self.W[1].bias, 0.0)

    def forward(self, main_feat, aux_feat):
        """
        输入:
            main_feat: 主要模态特征 [B, C, H, W]
            aux_feat: 辅助模态特征 [B, C, H, W]
        输出:
            增强后的主要模态特征 [B, C, H, W]
        """
        B, C, H, W = main_feat.size()

        # 投影到QKV空间
        # Q [B,C//r,H*W]
        Q = self.theta(aux_feat).view(B, self.C // self.reduction_ratio, -1)
        # K [B,H*W,C//r]
        K = self.phi(main_feat).view(B, self.C // self.reduction_ratio, -1).permute(0, 2, 1)
        # V [B,C//r,H*W]
        V = self.g(main_feat).view(B, self.C // self.reduction_ratio, -1)

        # 计算通道注意力
        # Q*K [B,C//r,C//r]（增加缩放因子）
        energy = torch.matmul(Q, K) / ((self.C // self.reduction_ratio) ** 0.5)
        # [B,C//r,C//r]
        attention = F.softmax(energy, dim=-1)
        # [B,C//r,H*w]
        y = torch.matmul(attention, V)
        # 恢复原始形状
        y = y.view(B, self.C // self.reduction_ratio, H, W)
        y = self.W(y)
        return y


class TeacherSpatialAttention(nn.Module):
    def __init__(self, C, reduction_ratio=3):
        super(TeacherSpatialAttention, self).__init__()
        self.C = C
        self.reduction_ratio = reduction_ratio
        # V
        self.g = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)
        # Q
        self.theta = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)
        # K
        self.phi = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)


        self.downsample = nn.Sequential(
            nn.Conv2d(self.C, self.C,  kernel_size=3,
            stride=reduction_ratio,
            padding=1,  # 保证下采样尺寸为 floor(H/3)
            bias=False),
        )
        self.upsample = nn.Sequential(
            nn.ConvTranspose2d(self.C, self.C,  kernel_size=3,
                stride=reduction_ratio,
                padding=1,
                output_padding=2,  # 动态调整
                bias=False),
            nn.BatchNorm2d(C)
        )
        nn.init.constant_(self.upsample[1].weight, 0.0)
        nn.init.constant_(self.upsample[1].bias, 0.0)

    def forward(self, main_feat, aux_feat):
        B, C, H, W = main_feat.size()

        # print(f"Input shape: {main_feat.shape} | Reduction ratio: {self.reduction_ratio}")
        # print(f"Expected downsample: {(H // self.reduction_ratio, W // self.reduction_ratio)}")

        # 下采样
        # [B,C,H//r,W//r]
        main_reduced = self.downsample(main_feat)
        # [B,C,H//r,W//r]
        aux_reduced = self.downsample(aux_feat)

        # print(f"actually downsample: {main_reduced.shape, aux_reduced.shape}")

        # Q [B,H*W//r**2,C]
        Q = self.theta(aux_reduced).view(B, C, -1).permute(0, 2, 1)
        # K [B,C,H*W//r**2]
        K = self.phi(main_reduced).view(B, C, -1)
        # V [B,H*W//r**2,C]
        V = self.g(main_reduced).view(B, C, -1).permute(0, 2, 1)

        # [B,H*W//r**2,H*W//r**2]
        energy = torch.bmm(Q, K) / (C ** 0.5)
        attention = F.softmax(energy, dim=-1)
        # [B,H*W//r**2,C]
        y = torch.bmm(attention, V)
        # [B,C,H//r,W//r]
        y = y.permute(0, 2, 1).view(B, C, H // self.reduction_ratio, W // self.reduction_ratio)
        y = self.upsample(y)
        return y


class TeacherModule(nn.Module):
    def __init__(self, in_channels, channel_reduction_ratio=1, spatial_reduction_ratio=3):
        super(TeacherModule, self).__init__()

        self.channel_attn = TeacherChannelAttention(in_channels, channel_reduction_ratio)
        self.spatial_attn = TeacherSpatialAttention(in_channels, spatial_reduction_ratio)

        # 输出融合，初始残差权重为0，初始阶段让模块等价于恒等映射，稳定训练初期
        # self.alpha = nn.Parameter(torch.zeros(1))
        # self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, vis_feat, ir_feat):
        """
        输入:
            vis_feat: 可见光特征 [B, C, H, W]
            ir_feat: 红外特征 [B, C, H, W]
        输出:
            enhanced_vis: 增强后的可见光特征 [B, C, H, W]
            enhanced_ir: 增强后的红外特征 [B, C, H, W]
        """
        # 第一阶段：跨模态通道注意力
        vis_channel = self.channel_attn(
            main_feat=vis_feat,
            aux_feat=ir_feat
        )

        ir_channel = self.channel_attn(
            main_feat=ir_feat,
            aux_feat=vis_feat
        )

        # 第二阶段：跨模态空间注意力
        vis_channel_spatial = self.spatial_attn(
            main_feat=vis_channel,  # 通道增强的可见光
            aux_feat=ir_channel  # 通道增强的红外作为辅助
        )

        ir_channel_spatial = self.spatial_attn(
            main_feat=ir_channel,  # 通道增强的红外
            aux_feat=vis_channel  # 通道增强的可见光作为辅助
        )

        return vis_feat +  vis_channel_spatial, ir_feat +  ir_channel_spatial
        # return vis_feat + self.alpha * vis_channel_spatial, ir_feat + self.gamma * ir_channel_spatial


class StudentChannelAttention(nn.Module):
    def __init__(self, C, reduction_ratio=1):
        super(StudentChannelAttention, self).__init__()
        self.C = C
        self.reduction_ratio = reduction_ratio
        # V==>g
        self.g = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
        # Q==>theta
        self.theta = nn.Conv2d(self.C, self.C // reduction_ratio, kernel_size=1, stride=1, padding=0)
        # K==>phi
        self.phi = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
        self.W = nn.Sequential(nn.Conv2d(self.C // self.reduction_ratio, self.C, kernel_size=1, stride=1, padding=0),
                               nn.BatchNorm2d(self.C), )
        nn.init.constant_(self.W[1].weight, 0.0)
        nn.init.constant_(self.W[1].bias, 0.0)

    def forward(self, feat):
        """
        输入:
            main_feat: 主要模态特征 [B, C, H, W]
            aux_feat: 辅助模态特征 [B, C, H, W]
        输出:
            增强后的主要模态特征 [B, C, H, W]
        """
        B, C, H, W = feat.size()

        # 投影到QKV空间
        # Q [B,C//r,H*W]
        Q = self.theta(feat).view(B, self.C // self.reduction_ratio, -1)
        # K [B,H*W,C//r]
        K = self.phi(feat).view(B, self.C // self.reduction_ratio, -1).permute(0, 2, 1)
        # V [B,C//r,H*W]
        V = self.g(feat).view(B, self.C // self.reduction_ratio, -1)

        # 计算通道注意力
        # Q*K [B,C//r,C//r]（增加缩放因子）
        energy = torch.matmul(Q, K) / ((self.C // self.reduction_ratio) ** 0.5)
        # [B,C//r,C//r]
        attention = F.softmax(energy, dim=-1)
        # [B,C//r,H*w]
        y = torch.matmul(attention, V)
        # 恢复原始形状
        y = y.view(B, self.C // self.reduction_ratio, H, W)
        y = self.W(y)
        return y


class StudentSpatialAttention(nn.Module):
    def __init__(self, C, reduction_ratio=3):
        super(StudentSpatialAttention, self).__init__()
        self.C = C
        self.reduction_ratio = reduction_ratio
        # V
        self.g = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)
        # Q
        self.theta = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)
        # K
        self.phi = nn.Conv2d(self.C, self.C, kernel_size=1, stride=1, padding=0)

        kernel_size = 2 * self.reduction_ratio - 1
        padding = self.reduction_ratio - 1

        self.downsample = nn.Sequential(
            nn.Conv2d(self.C, self.C, kernel_size=kernel_size, stride=self.reduction_ratio, padding=padding,
                      bias=False),
        )
        self.upsample = nn.Sequential(
            nn.ConvTranspose2d(self.C, self.C, kernel_size=kernel_size,
                               stride=self.reduction_ratio,
                               padding=padding,
                               output_padding=self.reduction_ratio - 1, bias=False),
            nn.BatchNorm2d(C)
        )
        nn.init.constant_(self.upsample[1].weight, 0.0)
        nn.init.constant_(self.upsample[1].bias, 0.0)

    def forward(self, feat):
        B, C, H, W = feat.size()

        # 下采样
        # [B,C,H//r,W//r]
        main_reduced = self.downsample(feat)

        # Q [B,H*W//r**2,C]
        Q = self.theta(main_reduced).view(B, C, -1).permute(0, 2, 1)
        # K [B,C,H*W//r**2]
        K = self.phi(main_reduced).view(B, C, -1)
        # V [B,H*W//r**2,C]
        V = self.g(main_reduced).view(B, C, -1).permute(0, 2, 1)

        # [B,H*W//r**2,H*W//r**2]
        energy = torch.bmm(Q, K) / (C ** 0.5)
        attention = F.softmax(energy, dim=-1)
        # [B,H*W//r**2,C]
        y = torch.bmm(attention, V)
        # [B,C,H//r,W//r]
        y = y.permute(0, 2, 1).view(B, C, H // self.reduction_ratio, W // self.reduction_ratio)
        y = self.upsample(y)
        return y


class StudentModule(nn.Module):
    def __init__(self, in_channels, channel_reduction_ratio=1, spatial_reduction_ratio=3):
        super(StudentModule, self).__init__()

        self.channel_attn = StudentChannelAttention(in_channels, channel_reduction_ratio)
        self.spatial_attn = StudentSpatialAttention(in_channels, spatial_reduction_ratio)
        # 输出融合，初始残差权重为0，初始阶段让模块等价于恒等映射，稳定训练初期
        # self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, feat):
        # 第一阶段：跨模态通道注意力
        feat_channel = self.channel_attn(feat)
        feat_channel_spatial = self.spatial_attn(feat_channel)

        return feat + feat_channel_spatial
        # return feat + self.gamma * feat_channel_spatial


class Teacher(nn.Module):
    def __init__(self, dataset, channel_reduction_ratio=1, spatial_reduction_ratio=3):
        super(Teacher, self).__init__()
        self.dataset = dataset
        self.TeacherModule1 = TeacherModule(256, channel_reduction_ratio, spatial_reduction_ratio)
        self.TeacherModule2 = TeacherModule(512, channel_reduction_ratio, spatial_reduction_ratio)
        if dataset == 'sysu':
            self.TeacherModule3 = TeacherModule(1024, channel_reduction_ratio, spatial_reduction_ratio)

    def forward(self, vis_feat, ir_feat):
        vis_feat, ir_feat = self.TeacherModule1(vis_feat, ir_feat)
        vis_feat, ir_feat = self.TeacherModule2(vis_feat, ir_feat)
        if self.dataset == 'sysu':
            vis_feat, ir_feat = self.TeacherModule3(vis_feat, ir_feat)
        return vis_feat, ir_feat


class Student(nn.Module):
    def __init__(self, dataset, channel_reduction_ratio=1, spatial_reduction_ratio=3):
        super(Student, self).__init__()
        self.dataset = dataset
        self.StudentModule1 = StudentModule(256, channel_reduction_ratio, spatial_reduction_ratio)
        self.StudentModule2 = StudentModule(512, channel_reduction_ratio, spatial_reduction_ratio)
        if dataset == 'sysu':
            self.StudentModule3 = StudentModule(1024, channel_reduction_ratio, spatial_reduction_ratio)

    def forward(self, feat):
        feat = self.StudentModule1(feat)
        feat = self.StudentModule2(feat)
        if self.dataset == 'sysu':
            feat = self.StudentModule3(feat)
        return feat


class TSNet(nn.Module):
    def __init__(self, class_num, dataset, arch='resnet50'):
        super(TSNet, self).__init__()

        self.thermal_module = thermal_module(arch=arch)
        self.visible_module = visible_module(arch=arch)
        self.base_resnet = base_resnet(arch=arch)

        self.dataset = dataset
        self.Teacher = Teacher(self.dataset)
        self.Student = Student(self.dataset)
        if self.dataset == 'regdb':  # For regdb dataset, we remove the MFA3 block and layer4.
            pool_dim = 1024
            self.DEE = DEE_module(512)
        else:
            pool_dim = 2048
            self.DEE = DEE_module(1024)

        self.bottleneck = nn.BatchNorm1d(pool_dim)
        self.bottleneck.bias.requires_grad_(False)  # no shift
        self.bottleneck.apply(weights_init_kaiming)
        self.classifier = nn.Linear(pool_dim, class_num, bias=False)
        self.classifier.apply(weights_init_classifier)

        self.l2norm = Normalize(2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    def forward(self, vis, ir, modal=0):
        # 输入x1: 可见光图像 [B,3,384,144]
        # 输入x2: 红外图像 [B,3,384,144]
        if modal == 0:
            # [B, 64, 96, 36]
            vis = self.visible_module(vis)
            ir = self.thermal_module(ir)

            vis = self.base_resnet.base.layer1(vis)
            ir = self.base_resnet.base.layer1(ir)
            t_vis, t_ir = self.Teacher.TeacherModule1(vis, ir)
            s_vis = self.Student.StudentModule1(vis)
            s_ir = self.Student.StudentModule1(ir)

            t_vis = self.base_resnet.base.layer2(t_vis)
            t_ir = self.base_resnet.base.layer2(t_ir)
            t_vis, t_ir = self.Teacher.TeacherModule2(t_vis, t_ir)
            s_vis = self.base_resnet.base.layer2(s_vis)
            s_ir = self.base_resnet.base.layer2(s_ir)
            s_vis = self.Student.StudentModule2(s_vis)
            s_ir = self.Student.StudentModule2(s_ir)
            if self.dataset == 'sysu':
                t_vis = self.base_resnet.base.layer3(t_vis)
                t_ir = self.base_resnet.base.layer3(t_ir)
                t_vis, t_ir = self.Teacher.TeacherModule3(t_vis, t_ir)
                s_vis = self.base_resnet.base.layer3(s_vis)
                s_ir = self.base_resnet.base.layer3(s_ir)
                s_vis = self.Student.StudentModule3(s_vis)
                s_ir = self.Student.StudentModule3(s_ir)

                t_vis = self.DEE(t_vis)
                t_ir = self.DEE(t_ir)
                s_vis = self.DEE(s_vis)
                s_ir = self.DEE(s_ir)

                t_vis = self.base_resnet.base.layer4(t_vis)
                t_ir = self.base_resnet.base.layer4(t_ir)
                s_vis = self.base_resnet.base.layer4(s_vis)
                s_ir = self.base_resnet.base.layer4(s_ir)
            else:
                t_vis = self.DEE(t_vis)
                t_ir = self.DEE(t_ir)
                s_vis = self.DEE(s_vis)
                s_ir = self.DEE(s_ir)

                t_vis = self.base_resnet.base.layer3(t_vis)
                t_ir = self.base_resnet.base.layer3(t_ir)
                s_vis = self.base_resnet.base.layer3(s_vis)
                s_ir = self.base_resnet.base.layer3(s_ir)

            t_vis_p = self.avgpool(t_vis)
            t_ir_p = self.avgpool(t_ir)
            s_vis_p = self.avgpool(s_vis)
            s_ir_p = self.avgpool(s_ir)

            t_vis_pool = t_vis_p.view(t_vis_p.size(0), t_vis_p.size(1))
            t_ir_pool = t_ir_p.view(t_ir_p.size(0), t_ir_p.size(1))
            s_vis_pool = s_vis_p.view(s_vis_p.size(0), s_vis_p.size(1))
            s_ir_pool = s_ir_p.view(s_ir_p.size(0), s_ir_p.size(1))

            t_vis_feat = self.bottleneck(t_vis_pool)
            t_ir_feat = self.bottleneck(t_ir_pool)
            t_feat = torch.cat((t_vis_feat, t_ir_feat), 0)
            s_vis_feat = self.bottleneck(s_vis_pool)
            s_ir_feat = self.bottleneck(s_ir_pool)
            s_feat = torch.cat((s_vis_feat, s_ir_feat), 0)
            if self.training:
                t_vis_ps = t_vis_p.view(t_vis_p.size(0), t_vis_p.size(1), t_vis_p.size(2)).permute(0, 2, 1)
                t_vis_p1, t_vis_p2, t_vis_p3 = torch.chunk(t_vis_ps, 3, 0)
                t_vis_pss = torch.cat((t_vis_p2, t_vis_p3), 1)
                t_vis_loss_ort = torch.triu(torch.bmm(t_vis_pss, t_vis_pss.permute(0, 2, 1)), diagonal=1).sum() / (
                    t_vis_p.size(0))

                t_ir_ps = t_ir_p.view(t_ir_p.size(0), t_ir_p.size(1), t_ir_p.size(2)).permute(0, 2, 1)
                t_ir_p1, t_ir_p2, t_ir_p3 = torch.chunk(t_ir_ps, 3, 0)
                t_ir_ss = torch.cat((t_ir_p2, t_ir_p3), 1)
                t_ir_loss_ort = torch.triu(torch.bmm(t_ir_ss, t_ir_ss.permute(0, 2, 1)), diagonal=1).sum() / (
                    t_ir_p.size(0))

                t_loss_ort = t_vis_loss_ort + t_ir_loss_ort

                s_vis_ps = s_vis_p.view(s_vis_p.size(0), s_vis_p.size(1), s_vis_p.size(2)).permute(0, 2, 1)
                s_vis_p1, s_vis_p2, s_vis_p3 = torch.chunk(s_vis_ps, 3, 0)
                s_vis_pss = torch.cat((s_vis_p2, s_vis_p3), 1)
                s_vis_loss_ort = torch.triu(torch.bmm(s_vis_pss, s_vis_pss.permute(0, 2, 1)), diagonal=1).sum() / (
                    s_vis_p.size(0))

                s_ir_ps = s_ir_p.view(s_ir_p.size(0), s_ir_p.size(1), s_ir_p.size(2)).permute(0, 2, 1)
                s_ir_p1, s_ir_p2, s_ir_p3 = torch.chunk(s_ir_ps, 3, 0)
                s_ir_pss = torch.cat((s_ir_p2, s_ir_p3), 1)
                s_ir_loss_ort = torch.triu(torch.bmm(s_ir_pss, s_ir_pss.permute(0, 2, 1)), diagonal=1).sum() / (
                    s_ir_p.size(0))

                s_loss_ort = s_vis_loss_ort + s_ir_loss_ort

                loss_ort = t_loss_ort + s_loss_ort
                return t_vis_pool,t_ir_pool,s_vis_pool,s_ir_pool , self.classifier(t_feat), self.classifier(s_feat), loss_ort

        elif modal == 1:
            vis = self.visible_module(vis)

            vis = self.base_resnet.base.layer1(vis)
            s_vis = self.Student.StudentModule1(vis)

            s_vis = self.base_resnet.base.layer2(s_vis)
            s_vis = self.Student.StudentModule2(s_vis)
            if self.dataset == 'sysu':
                s_vis = self.base_resnet.base.layer3(s_vis)
                s_vis = self.Student.StudentModule3(s_vis)

                s_vis = self.DEE(s_vis)

                s_vis = self.base_resnet.base.layer4(s_vis)
            else:
                s_vis = self.DEE(s_vis)

                s_vis = self.base_resnet.base.layer3(s_vis)

            s_vis_p = self.avgpool(s_vis)
            s_vis_pool = s_vis_p.view(s_vis_p.size(0), s_vis_p.size(1))
            s_vis_feat = self.bottleneck(s_vis_pool)
            return self.l2norm(s_vis_pool), self.l2norm(s_vis_feat)
        elif modal == 2:
            ir = self.thermal_module(ir)

            ir = self.base_resnet.base.layer1(ir)
            s_ir = self.Student.StudentModule1(ir)

            s_ir = self.base_resnet.base.layer2(s_ir)
            s_ir = self.Student.StudentModule2(s_ir)

            if self.dataset == 'sysu':
                s_ir = self.base_resnet.base.layer3(s_ir)
                s_ir = self.Student.StudentModule3(s_ir)

                s_ir = self.DEE(s_ir)
                s_ir = self.base_resnet.base.layer4(s_ir)
            else:
                s_ir = self.DEE(s_ir)
                s_ir = self.base_resnet.base.layer3(s_ir)

            s_ir_p = self.avgpool(s_ir)
            s_ir_pool = s_ir_p.view(s_ir_p.size(0), s_ir_p.size(1))
            s_ir_feat = self.bottleneck(s_ir_pool)
            return self.l2norm(s_ir_pool), self.l2norm(s_ir_feat)


class CNL(nn.Module):
    def __init__(self, high_dim, low_dim, flag=0):
        super(CNL, self).__init__()
        self.high_dim = high_dim
        self.low_dim = low_dim
        # V==>g
        self.g = nn.Conv2d(self.low_dim, self.low_dim, kernel_size=1, stride=1, padding=0)
        # Q==>theta
        self.theta = nn.Conv2d(self.high_dim, self.low_dim, kernel_size=1, stride=1, padding=0)
        if flag == 0:
            # K==>phi
            self.phi = nn.Conv2d(self.low_dim, self.low_dim, kernel_size=1, stride=1, padding=0)
            self.W = nn.Sequential(nn.Conv2d(self.low_dim, self.high_dim, kernel_size=1, stride=1, padding=0),
                                   nn.BatchNorm2d(high_dim), )
        else:
            self.phi = nn.Conv2d(self.low_dim, self.low_dim, kernel_size=1, stride=2, padding=0)
            self.W = nn.Sequential(nn.Conv2d(self.low_dim, self.high_dim, kernel_size=1, stride=2, padding=0),
                                   nn.BatchNorm2d(self.high_dim), )
        nn.init.constant_(self.W[1].weight, 0.0)
        nn.init.constant_(self.W[1].bias, 0.0)

    def forward(self, x_h, x_l):
        B = x_h.size(0)  # 获取批次大小
        # V [B, low_dim, N]，其中N是特征图的总像素数
        g_x = self.g(x_l).view(B, self.low_dim, -1)
        # Q [B, low_dim, N]
        theta_x = self.theta(x_h).view(B, self.low_dim, -1)
        # K [B, low_dim, N]，然后转置为[B, N, low_dim]
        phi_x = self.phi(x_l).view(B, self.low_dim, -1).permute(0, 2, 1)
        # QxK [B, low_dim, low_dim]，注意力能量矩阵
        energy = torch.matmul(theta_x, phi_x)
        # [B, low_dim, low_dim]，归一化注意力能量矩阵
        attention = energy / energy.size(-1)
        # axV [B, low_dim, N]，注意力加权
        y = torch.matmul(attention, g_x)
        # [B, low_dim, H, W]，将y恢复为原始特征图的形状
        y = y.view(B, self.low_dim, *x_l.size()[2:])
        # [B, high_dim, H, W]通过W层将y恢复到高维度
        W_y = self.W(y)
        # 将W_y与原始高维度特征图x_h相加，得到最终输出
        z = W_y + x_h
        return z


class PNL(nn.Module):
    def __init__(self, high_dim, low_dim, reduc_ratio=2):
        super(PNL, self).__init__()
        self.high_dim = high_dim
        self.low_dim = low_dim
        self.reduc_ratio = reduc_ratio
        # V
        self.g = nn.Conv2d(self.low_dim, self.low_dim // self.reduc_ratio, kernel_size=1, stride=1, padding=0)
        # Q
        self.theta = nn.Conv2d(self.high_dim, self.low_dim // self.reduc_ratio, kernel_size=1, stride=1, padding=0)
        # K
        self.phi = nn.Conv2d(self.low_dim, self.low_dim // self.reduc_ratio, kernel_size=1, stride=1, padding=0)

        self.W = nn.Sequential(
            nn.Conv2d(self.low_dim // self.reduc_ratio, self.high_dim, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(high_dim), )
        nn.init.constant_(self.W[1].weight, 0.0)
        nn.init.constant_(self.W[1].bias, 0.0)

    def forward(self, x_h, x_l):
        # x_h的形状是[B,high_dim,H,W],x_l的形状是[B,low_dim,H,W]
        B = x_h.size(0)
        # V [B,low_dim,H*W//reduc_ratio]
        g_x = self.g(x_l).view(B, self.low_dim, -1)
        # V [B,H*W//reduc_ratio,low_dim]
        g_x = g_x.permute(0, 2, 1)
        # Q [B,low_dim,H*W//reduc_ratio]
        theta_x = self.theta(x_h).view(B, self.low_dim, -1)
        # Q [B,H*W//reduc_ratio,low_dim]
        theta_x = theta_x.permute(0, 2, 1)
        # K [B,low_dim,H*W//reduc_ratio]
        phi_x = self.phi(x_l).view(B, self.low_dim, -1)
        # QxK [B,H*W//reduc_ratio,H*W//reduc_ratio]
        energy = torch.matmul(theta_x, phi_x)
        # [B,H*W//reduc_ratio,H*W//reduc_ratio]
        attention = energy / energy.size(-1)
        # [B,H*W//reduc_ratio,low_dim]
        y = torch.matmul(attention, g_x)
        # [B,low_dim,H*W//reduc_ratio]
        y = y.permute(0, 2, 1).contiguous()
        # [B,low_dim//reduc_ratio,H,W]
        y = y.view(B, self.low_dim // self.reduc_ratio, *x_h.size()[2:])
        # [B,high_dim,H,W]
        W_y = self.W(y)
        # [B,high_dim,H,W]
        z = W_y + x_h
        return z


class MFA_block(nn.Module):
    def __init__(self, high_dim, low_dim, flag):
        super(MFA_block, self).__init__()

        self.CNL = CNL(high_dim, low_dim, flag)
        self.PNL = PNL(high_dim, low_dim)

    def forward(self, x, x0):
        # x高级特征，x0低级特征
        z = self.CNL(x, x0)
        z = self.PNL(z, x0)
        return z

class embed_net(nn.Module):
    def __init__(self, class_num, dataset, arch='resnet50'):
        super(embed_net, self).__init__()

        self.thermal_module = thermal_module(arch=arch)
        self.visible_module = visible_module(arch=arch)
        self.base_resnet = base_resnet(arch=arch)

        self.dataset = dataset
        if self.dataset == 'regdb':  # For regdb dataset, we remove the MFA3 block and layer4.
            pool_dim = 1024
            self.DEE = DEE_module(512)
            self.MFA1 = MFA_block(256, 64, 0)
            self.MFA2 = MFA_block(512, 256, 1)
        else:
            pool_dim = 2048
            self.DEE = DEE_module(1024)
            self.MFA1 = MFA_block(256, 64, 0)
            self.MFA2 = MFA_block(512, 256, 1)
            self.MFA3 = MFA_block(1024, 512, 1)

        self.bottleneck = nn.BatchNorm1d(pool_dim)
        self.bottleneck.bias.requires_grad_(False)  # no shift
        self.bottleneck.apply(weights_init_kaiming)
        self.classifier = nn.Linear(pool_dim, class_num, bias=False)
        self.classifier.apply(weights_init_classifier)

        self.l2norm = Normalize(2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    def forward(self, x1, x2, modal=0):
        # 输入x1: 可见光图像 [B,3,384,144]
        # 输入x2: 红外图像 [B,3,384,144]
        if modal == 0:
            # [B, 64, 96, 36]
            x1 = self.visible_module(x1)
            x2 = self.thermal_module(x2)
            # [2B, 64, 96, 36]
            x = torch.cat((x1, x2), 0)
        elif modal == 1:
            x = self.visible_module(x1)
        elif modal == 2:
            x = self.thermal_module(x2)
        # [2B, 64, 96, 36]
        x_ = x
        # [2B, 256, 96, 36]
        x = self.base_resnet.base.layer1(x_)
        # [2B, 256, 96, 36]
        x_ = self.MFA1(x, x_)
        # [2B, 512, 48, 18]
        x = self.base_resnet.base.layer2(x_)
        # [2B, 512, 48, 18]
        x_ = self.MFA2(x, x_)
        if self.dataset == 'regdb':  # For regdb dataset, we remove the MFA3 block and layer4.
            # [6B,512,48,18]（DEE拼接3倍batch）
            x_ = self.DEE(x_)
            # [6B,1024,24,12]
            x = self.base_resnet.base.layer3(x_)
        else:
            # [2B,1024,24,12]
            x = self.base_resnet.base.layer3(x_)
            # # [2B,1024,24,12]
            x_ = self.MFA3(x, x_)
            # [6B,1024,24,12]
            x_ = self.DEE(x_)
            # [6B,2048,12,6]
            x = self.base_resnet.base.layer4(x_)
        # [6B,2048,1,1]
        xp = self.avgpool(x)
        # [6B,2048]
        x_pool = xp.view(xp.size(0), xp.size(1))
        # [6B,2048]
        feat = self.bottleneck(x_pool)

        if self.training:
            # [6B, 1, 2048]
            xps = xp.view(xp.size(0), xp.size(1), xp.size(2)).permute(0, 2, 1)
            # [2B, 1, pool_dim],在第 0 维（batch 维度）上将 xps 分成 3 块
            xp1, xp2, xp3 = torch.chunk(xps, 3, 0)
            # [2B, 2, pool_dim]
            xpss = torch.cat((xp2, xp3), 1)
            # 计算正交损失，提取矩阵的上三角部分（torch.triu），不包含对角线（diagonal=1）
            loss_ort = torch.triu(torch.bmm(xpss, xpss.permute(0, 2, 1)), diagonal=1).sum() / (xp.size(0))

            return x_pool, self.classifier(feat), loss_ort
        else:
            return self.l2norm(x_pool), self.l2norm(feat)

# class TeacherSpatialAttention(nn.Module):
#     def __init__(self, C, reduction_ratio=2):
#         super(TeacherSpatialAttention, self).__init__()
#         self.C = C
#         self.reduction_ratio = reduction_ratio
#         # V
#         self.g = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
#         # Q
#         self.theta = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
#         # K
#         self.phi = nn.Conv2d(self.C, self.C // self.reduction_ratio, kernel_size=1, stride=1, padding=0)
#
#         self.W = nn.Sequential(
#             nn.Conv2d(self.C // self.reduction_ratio, self.C, kernel_size=1, stride=1, padding=0),
#             nn.BatchNorm2d(self.C), )
#         nn.init.constant_(self.W[1].weight, 0.0)
#         nn.init.constant_(self.W[1].bias, 0.0)
#
#         # 输出融合
#         self.gamma = nn.Parameter(torch.zeros(1))
#
#     def forward(self, main_feat, aux_feat):
#         B, C, H, W = main_feat.size()
#         # Q [B,H*W//r,C]
#         Q=self.theta(aux_feat).view(B, self.C , -1).permute(0, 2, 1)
#         # K [B,C,H*W//r]
#         K=self.phi(main_feat).view(B, self.C , -1)
#         # V [B,H*W//r,C]
#         V=self.g(main_feat).view(B, self.C , -1).permute(0, 2, 1)
#         # [B,H*W//r,H*W//r]
#         energy=torch.matmul(Q, K) / ((H*W//self.reduction_ratio) ** 0.5)
#         attention=F.softmax(energy, dim=-1)
#         # [B,H*W//r,C]
#         y=torch.matmul(attention, V)
#         # [B,C,H*W//r]
#         y=y.permute(0, 2, 1).contiguous()
#         # [B,C//r,H,W]
#         y=y.view(B, self.C//self.reduction_ratio , H, W)
#         # [B,C,H,W]
#         y=self.W(y)
#         return main_feat+self.gamma*y