# from visualizer import get_local
import torch
import torch.nn as nn

import torch.nn.functional as F
from functools import partial
import warnings
# from visualizer import get_local
from re_spikingjelly.spikingjelly.clock_driven.neuron import MultiStepParametricLIFNode, MultiStepLIFNode
from ultralytics.utils import LOGGER
from ultralytics.utils.tal import TORCH_1_10, dist2bbox, make_anchors
import math
# __all__ = ('MS_GetT','MS_CancelT', 'MS_ConvBlock','MS_Block','MS_DownSampling',
#            'MS_StandardConv','SpikeSPPF','SpikeConv','MS_Concat','SpikeDetect'
#            ,'Ann_ConvBlock','Ann_DownSampling','Ann_StandardConv','Ann_SPPF','MS_C2f',
#            'Conv_1','BasicBlock_1','BasicBlock_2','Concat_res2','Sample','MS_FullConvBlock','MS_ConvBlock_resnet50','MS_AllConvBlock','MS_ConvBlock_res2net')
import swanlab
import torchvision.utils as vutils
import io
import matplotlib.pyplot as plt
import numpy as np
from ultralytics.nn.modules.conv import Conv
# from mmcv.ops import ModulatedDeformConv2dPack
from torchvision.ops import deform_conv2d

def draw_heatmap_grid(fmap_5d, tag="P3", step=0, nrow=8, max_channels=None):
    """
    将 [1, 1, C, H, W] 的特征图拼成热力图棋盘，支持限制前 max_channels 个通道显示，并上传至 SwanLab。

    参数：
        fmap_5d (Tensor): [1, 1, C, H, W] 形状的特征图
        tag (str): 图像标签
        step (int): 当前 step，仅用于标注
        nrow (int): 每行拼接图像数量
        max_channels (int or None): 最多显示前几个通道。如果为 None，则显示全部通道
    """
    fmap = fmap_5d[0, 0]  # 取 batch=0, time=0，shape: [C, H, W]
    total_channels = fmap.shape[0]

    if max_channels is not None:
        fmap = fmap[:max_channels]
        total_channels = min(total_channels, max_channels)
    fmap = (fmap - fmap.min()) / (fmap.max() - fmap.min() + 1e-5)  # 标准化到0~1
    fmap = fmap.unsqueeze(1)  # [C, 1, H, W]，增加通道维度（灰度）
    # print(f"Feature map shape for {tag} at step {step}: {fmap.shape}")

    # 拼成 grid 图像（像素值仍是灰度）
    grid = vutils.make_grid(fmap, nrow=nrow, padding=2, normalize=False, pad_value=0) # [C, H, W]，C=1
    grid = grid.mean(0).cpu().numpy()  # [H, W] 灰度图
    # print(f"Grid shape for {tag} at step {step}: {grid.shape}")
    cmap = plt.get_cmap('jet')

    heatmap_rgb = cmap(grid)[:, :, :3]  # [H, W, 3], float32
    # print(f"heatmap_rgb shape for {tag} at step {step}: {heatmap_rgb.shape}")
    heatmap_rgb = (heatmap_rgb * 255).astype(np.uint8)  # → uint8 格式

    # 上传至 SwanLab
    # swan_img = swanlab.Image(heatmap_rgb, caption=f"{tag} Heatmap Grid ({total_channels} channels)", size=(2048, 2048))
    # swanlab.log({f"{tag}_heatmap_grid": swan_img})


decay = 0.25  # 0.25 # decay constants


class mem_update(nn.Module):
    def __init__(self, act=False):
        super(mem_update, self).__init__()
        # self.actFun= torch.nn.LeakyReLU(0.2, inplace=False)

        self.act = act
        self.qtrick = MultiSpike4()  # change the max value

    def forward(self, x):

        spike = torch.zeros_like(x[0]).to(x.device)
        output = torch.zeros_like(x)
        mem_old = 0
        time_window = x.shape[0]
        for i in range(time_window):
            if i >= 1:
                mem = (mem_old - spike.detach()) * decay + x[i]

            else:
                mem = x[i]
            spike = self.qtrick(mem)

            mem_old = mem.clone()
            output[i] = spike
        # print(output[0][0][0][0])
        return output


class MultiSpike8(nn.Module):  # 直接调用实例化的quant6无法实现深拷贝。解决方案是像下面这样用嵌套的类

    class quant8(torch.autograd.Function):

        @staticmethod
        def forward(ctx, input):
            ctx.save_for_backward(input)
            return torch.round(torch.clamp(input, min=0, max=8))

        @staticmethod
        def backward(ctx, grad_output):
            input, = ctx.saved_tensors
            grad_input = grad_output.clone()
            #             print("grad_input:",grad_input)
            grad_input[input < 0] = 0
            grad_input[input > 8] = 0
            return grad_input

    def forward(self, x):
        #         print(self.quant8.apply(x))
        return self.quant8.apply(x)


class MultiSpike4(nn.Module):

    class quant4(torch.autograd.Function):

        @staticmethod
        def forward(ctx, input):
            ctx.save_for_backward(input)
            return torch.round(torch.clamp(input, min=0, max=4))

        @staticmethod
        def backward(ctx, grad_output):
            input, = ctx.saved_tensors
            grad_input = grad_output.clone()
            #             print("grad_input:",grad_input)
            grad_input[input < 0] = 0
            grad_input[input > 4] = 0
            return grad_input

    def forward(self, x):
        return self.quant4.apply(x)


class MultiSpike2(nn.Module):  # 直接调用实例化的quant6无法实现深拷贝。解决方案是像下面这样用嵌套的类

    class quant2(torch.autograd.Function):

        @staticmethod
        def forward(ctx, input):
            ctx.save_for_backward(input)
            return torch.round(torch.clamp(input, min=0, max=2))

        @staticmethod
        def backward(ctx, grad_output):
            input, = ctx.saved_tensors
            grad_input = grad_output.clone()
            #             print("grad_input:",grad_input)
            grad_input[input < 0] = 0
            grad_input[input > 2] = 0
            return grad_input

    def forward(self, x):
        return self.quant2.apply(x)


class MultiSpike1(nn.Module):

    class quant1(torch.autograd.Function):

        @staticmethod
        def forward(ctx, input):
            ctx.save_for_backward(input)
            return torch.round(torch.clamp(input, min=0, max=1))

        @staticmethod
        def backward(ctx, grad_output):
            input, = ctx.saved_tensors
            grad_input = grad_output.clone()
            #             print("grad_input:",grad_input)
            grad_input[input < 0] = 0
            grad_input[input > 1] = 0
            return grad_input

    def forward(self, x):
        return self.quant1.apply(x)


def autopad(k, p=None, d=1):  # kernel, padding, dilation
    # Pad to 'same' shape outputs
    if d > 1:
        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p


@torch.jit.script
def jit_mul(x, y):
    return x.mul(y)


@torch.jit.script
def jit_sum(x):
    return x.sum(dim=[-1, -2], keepdim=True)


class SpikeDFL(nn.Module):
    """
    Integral module of Distribution Focal Loss (DFL).

    Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
    """

    def __init__(self, c1=16):
        """Initialize a convolutional layer with a given number of input channels."""
        super().__init__()
        self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
        x = torch.arange(c1, dtype=torch.float)  # [0,1,2,...,15]
        self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))  # 这里不是脉冲驱动的，但是是整数乘法
        self.c1 = c1  # 本质上就是个加权和。输入是每个格子的概率(小数)，权重是每个格子的位置(整数)
        # self.lif = mem_update()
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

    def forward(self, x):
        """Applies a transformer layer on input tensor 'x' and returns a tensor."""
        b, c, a = x.shape  # batch, channels, anchors
        return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)  # 原版


class SpikeDetect(nn.Module):
    """YOLOv8 的 Spike 脉冲检测头模块，用于目标检测任务。"""
    dynamic = False  # 是否启用动态 anchor/grid 重构
    export = False  # 是否导出模式（用于转换成 ONNX、TensorRT、TFLite 等）
    shape = None  # 上一次输入的形状缓存，用于判断是否需要重新生成 anchor/grid
    anchors = torch.empty(0)  # 初始化 anchor
    strides = torch.empty(0)  # 初始化步长（每个检测头对应下采样比例）
    swanlab_idx = [x for x in range(0, 50)]   # swanlab 可视化轮数索引
    idx = 0

    def __init__(self, nc=80, ch=()):
        """
        初始化 SpikeDetect 层
        nc: 类别数量（number of classes）
        ch: 每个特征层的通道数（输入特征图的通道数列表）
        """
        super().__init__()
        self.nc = nc  # 类别数
        self.nl = len(ch)  # 检测层数量（3 个输出特征层：P3、P4、P5）
        self.reg_max = 16  # DFL（Distribution Focal Loss）分桶数，用于边框回归
        self.no = nc + self.reg_max * 4  # 每个锚点的输出维度：边框+分类
        self.stride = torch.zeros(self.nl)  # 每个检测层的 stride（实际赋值由 build 阶段决定）

        # 定义回归头（边框预测）：每层包含3个卷积，输出维度为 4 * reg_max（每个坐标回归 reg_max 概率分布）
        c2 = max((16, ch[0] // 4, self.reg_max * 4))  # 回归分支的中间通道数
        self.cv2 = nn.ModuleList(
            nn.Sequential(
                SpikeConv(x, c2, 3),
                SpikeConv(c2, c2, 3),
                SpikeConvWithoutBN(c2, 4 * self.reg_max, 1)) for x in ch)

        # 定义分类头：每层包含3个卷积，输出维度为类别数
        c3 = max(ch[0], min(self.nc, 100))  # 分类分支的中间通道数
        self.cv3 = nn.ModuleList(
            nn.Sequential(
                SpikeConv(x, c3, 3),
                SpikeConv(c3, c3, 3),
                SpikeConvWithoutBN(c3, self.nc, 1)) for x in ch)

        # DFL 模块：将分布预测转为实际边框坐标
        self.dfl = SpikeDFL(self.reg_max) if self.reg_max > 1 else nn.Identity()

    def forward(self, x):
        """
        前向推理函数
        x: 输入为列表（包含3个不同尺度的特征图）
        返回值：训练模式返回中间结果；推理模式返回解码后的边框和类别概率
        """
        shape = x[0].mean(0).shape  # 获取特征图的形状（BCHW）【注意：取 mean(0) 以避免 batch 维度影响 shape 缓存】
        # for i in range(self.nl):
        #     print(f"Detect Layer {i} Input Shape: {x[i].shape}")  # 打印每个检测层的输入形状
        # swanlab 打印
        # if not self.training:
        #     tags = ['P3', 'P4', 'P5']  # 特征图标签，用于可视化
        #     if self.idx in self.swanlab_idx:  # 仅在第一次调用时打印
        #         for i, (fmap, tag) in enumerate(zip(x, tags)):
        #             draw_heatmap_grid(fmap_5d=fmap, tag=tag, step=0, nrow=8, max_channels=64)

        self.idx += 1  # 累计调用次数，用于 swanlab 可视化

        # 遍历每个检测层，对其进行回归与分类预测并拼接
        for i in range(self.nl):
            # 拼接回归输出和分类输出，dim=2 表示通道维度合并（即 [B,C,H,W] 中的 C）
            x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 2)
            x[i] = x[i].mean(0)  # 时间维度上求平均，得到最终的静态特征图 [B,C,H,W]
            # LOGGER.info(f"Detect Layer {i} Output Shape: {x[i].shape}")  # 打印每个检测层的输出形状

        if self.training:
            # LOGGER.info("Detect Layer in Training Mode: Returning raw feature maps.")
            # 训练模式下直接返回 x（还未解码的原始特征图）
            return x

        elif self.dynamic or self.shape != shape:
            # 如果 shape 改变或处于 dynamic 模式，则重新生成 anchors 和 strides
            self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
            self.shape = shape  # 更新 shape 缓存

        # 拼接所有检测层的输出为一个 tensor：[B, no, 所有 anchor 总数]
        x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)

        if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'):
            # 特殊导出模式处理：为了避免某些框架不支持的 op（如 TF 的 Split）
            box = x_cat[:, :self.reg_max * 4]  # 回归输出
            cls = x_cat[:, self.reg_max * 4:]  # 分类输出
        else:
            # 正常推理模式下分割边框与分类部分
            box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)  # 分别取前 64 和后 nc 维度

        # 使用 DFL 解码回归分布，得到最终边框坐标，并乘以 stride 映射到原图尺度
        dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides

        if self.export and self.format in ('tflite', 'edgetpu'):
            # TFLite 相关的导出处理：将边框归一化到 0~1 范围，避免量化误差
            img_h = shape[2] * self.stride[0]
            img_w = shape[3] * self.stride[0]
            img_size = torch.tensor([img_w, img_h, img_w, img_h], device=dbox.device).reshape(1, 4, 1)
            dbox /= img_size  # 归一化

        # 拼接边框与分类结果：[B, 4+nc, anchors]，其中分类用 sigmoid 激活
        y = torch.cat((dbox, cls.sigmoid()), 1)

        # 返回最终结果
        return y if self.export else (y, x)  # 推理时返回 y，训练或调试时同时返回中间 x

    def bias_init(self):
        """Initialize Detect() biases, WARNING: requires stride availability."""
        m = self  # self.model[-1]  # Detect() module
        # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
        # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum())  # nominal class frequency
        for a, b, s in zip(m.cv2, m.cv3, m.stride):  # from
            a[-1].conv.bias.data[:] = 1.0  # box
            b[-1].conv.bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2)  # cls (.01 objects, 80 classes, 640 img)


class BNAndPadLayer(nn.Module):
    def __init__(
            self,
            pad_pixels,
            num_features,
            eps=1e-5,
            momentum=0.1,
            affine=True,
            track_running_stats=True,
    ):
        super(BNAndPadLayer, self).__init__()
        self.bn = nn.BatchNorm2d(
            num_features, eps, momentum, affine, track_running_stats
        )
        self.pad_pixels = pad_pixels

    def forward(self, input):
        output = self.bn(input)
        if self.pad_pixels > 0:
            if self.bn.affine:
                pad_values = (
                    self.bn.bias.detach() -
                    self.bn.running_mean *
                    self.bn.weight.detach() /
                    torch.sqrt(self.bn.running_var + self.bn.eps)
                )
            else:
                pad_values = -self.bn.running_mean / torch.sqrt(
                    self.bn.running_var + self.bn.eps
                )
            output = F.pad(output, [self.pad_pixels] * 4)
            pad_values = pad_values.view(1, -1, 1, 1)
            output[:, :, 0: self.pad_pixels, :] = pad_values
            output[:, :, -self.pad_pixels:, :] = pad_values
            output[:, :, :, 0: self.pad_pixels] = pad_values
            output[:, :, :, -self.pad_pixels:] = pad_values
        return output

    @property
    def weight(self):
        return self.bn.weight

    @property
    def bias(self):
        return self.bn.bias

    @property
    def running_mean(self):
        return self.bn.running_mean

    @property
    def running_var(self):
        return self.bn.running_var

    @property
    def eps(self):
        return self.bn.eps


class RepConv(nn.Module):
    def __init__(
            self,
            in_channel,
            out_channel,
            kernel_size=3,
            bias=False,
            group=1
    ):
        super().__init__()
        padding = int((kernel_size - 1) / 2)
        # hidden_channel = in_channel
        conv1x1 = nn.Conv2d(in_channel, in_channel, 1, 1, 0, bias=False, groups=group)
        bn = BNAndPadLayer(pad_pixels=padding, num_features=in_channel)
        conv3x3 = nn.Sequential(
            # mem_update(), #11111
            nn.Conv2d(in_channel, in_channel, kernel_size, 1, 0, groups=in_channel, bias=False),  # 这里也是分组卷积
            # mem_update(),  #11111
            nn.Conv2d(in_channel, out_channel, 1, 1, 0, groups=group, bias=False),
            nn.BatchNorm2d(out_channel),
        )

        self.body = nn.Sequential(conv1x1, bn, conv3x3)

    def forward(self, x):
        return self.body(x)


class SepRepConv(nn.Module):
    def __init__(
            self,
            in_channel,
            out_channel,
            kernel_size=3,
            bias=False,
            group=1
    ):
        super().__init__()
        padding = int((kernel_size - 1) / 2)
        # hidden_channel = in_channel
        bn = BNAndPadLayer(pad_pixels=padding, num_features=in_channel)
        conv3x3 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 1, 1, 0, groups=group, bias=False),  # 这里也是分组卷积
            # mem_update(), #11111
            nn.Conv2d(out_channel, out_channel, kernel_size, 1, 0, groups=out_channel, bias=False),
        )

        self.body = nn.Sequential(bn, conv3x3)

    def forward(self, x):
        return self.body(x)


class SepAllConv(nn.Module):
    r"""
    Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
    """

    def __init__(self,
                 dim,
                 expansion_ratio=2,
                 act2_layer=nn.Identity,
                 bias=False,
                 kernel_size=3,  # 7,3
                 padding=1):
        super().__init__()
        padding = int((kernel_size - 1) / 2)
        med_channels = int(expansion_ratio * dim)
        self.pwconv1 = nn.Conv2d(dim, med_channels, kernel_size=1, stride=1, bias=bias)
        self.dwconv2 = nn.Conv2d(
            med_channels, med_channels, kernel_size=kernel_size,  # 7*7
            padding=padding, groups=med_channels, bias=bias)  # depthwise conv
#         self.pwconv3 = nn.Conv2d(med_channels, dim, kernel_size=1, stride=1, bias=bias,groups=1)
        self.pwconv3 = SepRepConv(med_channels, dim)  # 这里将sepconv最后一个卷积替换为重参数化卷积  大概提0.5个点，可以保留

        self.bn1 = nn.BatchNorm2d(med_channels)
        self.bn2 = nn.BatchNorm2d(med_channels)
        self.bn3 = nn.BatchNorm2d(dim)

        # self.lif1 = mem_update()
        # self.lif2 = mem_update()
        # self.lif3 = mem_update()

        self.lif1 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.lif2 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.lif3 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

    def forward(self, x):
        T, B, C, H, W = x.shape
#         print("x.shape:",x.shape)
        x = self.lif1(x)  # x1_lif:0.2328  x2_lif:0.0493  这里x2的均值偏小，因此其经过bn和lif后也偏小，发放率比较低；而x1均值偏大，因此发放率也高
        x = self.bn1(self.pwconv1(x.flatten(0, 1))).reshape(T, B, -1, H, W)  # flatten：从第0维开始，展开到第一维
        x = self.lif2(x)
        x = self.bn2(self.dwconv2(x.flatten(0, 1))).reshape(T, B, -1, H, W)
        x = self.lif3(x)
        x = self.bn3(self.pwconv3(x.flatten(0, 1))).reshape(T, B, -1, H, W)
        return x


class SepConv(nn.Module):
    r"""
    Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381.
    """

    def __init__(self,
                 dim,
                 expansion_ratio=2,
                 act2_layer=nn.Identity,
                 bias=False,
                 kernel_size=3,  # 7,3
                 padding=1):
        super().__init__()
        padding = int((kernel_size - 1) / 2)
        med_channels = int(expansion_ratio * dim)
        self.pwconv1 = nn.Conv2d(dim, med_channels, kernel_size=1, stride=1, bias=bias)
        self.dwconv2 = nn.Conv2d(
            med_channels, med_channels, kernel_size=kernel_size,  # 7*7
            padding=padding, groups=med_channels, bias=bias)  # depthwise conv
#         self.pwconv3 = nn.Conv2d(med_channels, dim, kernel_size=1, stride=1, bias=bias,groups=1)
        self.pwconv3 = SepRepConv(med_channels, dim)  # 这里将sepconv最后一个卷积替换为重参数化卷积  大概提0.5个点，可以保留

        self.bn1 = nn.BatchNorm2d(med_channels)
        self.bn2 = nn.BatchNorm2d(med_channels)
        self.bn3 = nn.BatchNorm2d(dim)

        # self.lif1 = mem_update()
        # self.lif2 = mem_update()
        # self.lif3 = mem_update()

        self.lif1 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.lif2 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.lif3 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

    def forward(self, x):
        T, B, C, H, W = x.shape
#         print("x.shape:",x.shape)
        x = self.lif1(x)  # x1_lif:0.2328  x2_lif:0.0493  这里x2的均值偏小，因此其经过bn和lif后也偏小，发放率比较低；而x1均值偏大，因此发放率也高
        x = self.bn1(self.pwconv1(x.flatten(0, 1))).reshape(T, B, -1, H, W)  # flatten：从第0维开始，展开到第一维
        x = self.lif2(x)
        x = self.bn2(self.dwconv2(x.flatten(0, 1))).reshape(T, B, -1, H, W)
        x = self.lif3(x)
        x = self.bn3(self.pwconv3(x.flatten(0, 1))).reshape(T, B, -1, H, W)
        return x


class MS_ConvBlock(nn.Module):
    def __init__(self, input_dim, mlp_ratio=4., sep_kernel_size=7, full=False):  # in_channels(out_channels), 内部扩张比例
        super().__init__()

        self.full = full
        self.Conv = SepConv(dim=input_dim, kernel_size=sep_kernel_size)  # 内部扩张2倍
        self.mlp_ratio = mlp_ratio

        # self.lif1 = mem_update()
        # self.lif2 = mem_update()

        self.lif1 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.lif2 = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

        self.conv1 = RepConv(input_dim, int(input_dim * mlp_ratio))  # 137以外的模型，在第一个block不做分组

        self.bn1 = nn.BatchNorm2d(int(input_dim * mlp_ratio))  # 这里可以进行改进

        self.conv2 = RepConv(int(input_dim * mlp_ratio), input_dim)
        self.bn2 = nn.BatchNorm2d(input_dim)  # 这里可以进行改进

    # @get_local('x_feat')

    def forward(self, x):
        T, B, C, H, W = x.shape
        x = self.Conv(x) + x  # sepconv  pw+dw+pw

        x_feat = x

        x = self.bn1(self.conv1(self.lif1(x).flatten(0, 1))).reshape(T, B, int(self.mlp_ratio * C), H, W)
        # repconv，对应conv_mixer，包含1*1,3*3,1*1三个卷积，等价于一个3*3卷积
        x = self.bn2(self.conv2(self.lif2(x).flatten(0, 1))).reshape(T, B, C, H, W)
        x = x_feat + x

        return x


class MS_AllConvBlock(nn.Module):  # standard conv
    def __init__(self, input_dim, mlp_ratio=4., sep_kernel_size=7, group=False):  # in_channels(out_channels), 内部扩张比例
        super().__init__()

        self.Conv = SepConv(dim=input_dim, kernel_size=sep_kernel_size)

        self.mlp_ratio = mlp_ratio
        self.conv1 = MS_StandardConv(input_dim, int(input_dim * mlp_ratio), 3)
        self.conv2 = MS_StandardConv(int(input_dim * mlp_ratio), input_dim, 3)

    # @get_local('x_feat')

    def forward(self, x):
        T, B, C, H, W = x.shape
        x = self.Conv(x) + x  # sepconv  pw+dw+pw

        x_feat = x

        x = self.conv1(x)
        x = self.conv2(x)
        x = x_feat + x

        return x
    

class LIF(nn.Module):
    def __init__(self):
        super().__init__()
        # 模拟原网络的多层卷积 + pooling 效果
        # 640 -> 320 -> 160 -> 80
        self.block1 = MS_StandardConv(3, 32, k=3, p=1)
        self.block2 = MS_StandardConv(32, 64, k=3, p=1)
        self.block3 = MS_StandardConv(64, 64, k=3, p=1)
        self.block4 = MS_StandardConv(64, 1, k=1, p=0)
        
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

        self.pool = nn.AvgPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        if x.dim() == 4:
            # 如果输入是4维张量，添加时间维度
            x = x.unsqueeze(0)
        T, B, C, H, W = x.shape
        # LOGGER.info(f"LIF Input Shape: {x.shape}")
        # 确保输入形状为 [T, B, C, H, W]
        x = self.lif(x)  # 脉冲激活
        # LOGGER.info(f"LIF Input Shape: {x.shape}")
        x = self.pool(self.block1(x).flatten(0,1)).reshape(T, B, -1, H//2, W//2)
        x = self.pool(self.block2(x).flatten(0,1)).reshape(T, B, -1, H//4, W//4)
        x = self.pool(self.block3(x).flatten(0,1)).reshape(T, B, -1, H//8, W//8)
        x = self.block4(x)
        # LOGGER.info(f"LIF Output Shape: {x.shape}")
        # 输出去掉时间维度
        return x


class LIFAdd(nn.Module):
    def __init__(self, layer):
        super(LIFAdd, self).__init__()
        self.layer = layer
        self.pool_layer4 = nn.AvgPool2d(kernel_size=2, stride=2)
        self.pool_layer5 = nn.AvgPool2d(kernel_size=4, stride=4)
        self.beta = 0.4

    def forward(self, x):
        x_ir = x[0]
        T, B, C, H, W = x_ir.shape
        x_rgb = x[1]
        weight = x[2]
        # LOGGER.info(f"weight Shape: {weight.shape}")
        # LOGGER.info(f"x_ir Shape: {x_ir.shape}")
        step1 = ((weight - 0.31) / 0.63).flatten(0, 1)

        step2 = torch.clamp(step1, max=0.5)

        weight = self.beta * step2 + 0.5
        # h, w = x_rgb.shape[2:]
        # print(h, w)
        if self.layer == 3:
            pass
        elif self.layer == 4:
            weight = self.pool_layer4(weight)
        elif self.layer == 5:
            weight = self.pool_layer5(weight)
        # LOGGER.info(f"weight Shape after pool: {weight.shape}")
        weight = weight.reshape(T, B, 1, H, W)
        weight_rgb = weight
        weight_ir = 1 - weight
        # weight_rgb = torch.ones_like(weight)
        # weight_ir = 1 - weight
        return weight_rgb * x_rgb + weight_ir * x_ir
        # return x_rgb

class Multiin(nn.Module):  # stereo attention block
    def __init__(self, out=1):
        super().__init__()
        self.out = out

    def forward(self, x):
        x1, x2 = x[:, :, :3, :, :], x[:, :, 3:, :, :]
        if self.out == 1:
            x = x1
        else:
            x = x2
        # LOGGER.info(f"Multiin Output Shape: {x.shape}")
        return x


    
class ChannelPool(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1)
    
class SpatialGate(nn.Module):
    def __init__(self):
        super().__init__()
        kernel_size = 7
        self.spatial = Conv(2, 1, kernel_size, 1, (kernel_size-1)//2, act=nn.Sigmoid())
        self.compress = ChannelPool()

    def forward(self, x):
        x_compress = self.compress(x)
        scale = self.spatial(x_compress)
        return x * scale

class SELayer(nn.Module):
    def __init__(self, channel, reduction=16):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)
    

class ModulatedDeformConv2dPack(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, 
                 padding=1, dilation=1, deform_groups=1, bias=True):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.deform_groups = deform_groups

        # conv for offset + mask
        self.conv_offset_mask = nn.Conv2d(
            in_channels,
            self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1],
            kernel_size=self.kernel_size,
            stride=self.stride,
            padding=self.padding,
            dilation=self.dilation,
            bias=True
        )

        # conv weight & bias for deform conv
        self.weight = nn.Parameter(
            torch.randn(out_channels, in_channels, *self.kernel_size)
        )
        self.bias = nn.Parameter(torch.zeros(out_channels)) if bias else None

    def forward(self, x):
        out = self.conv_offset_mask(x)
        # split offset (2*k*k per group) and mask (k*k per group)
        o_channels = 2 * self.kernel_size[0] * self.kernel_size[1] * self.deform_groups
        offset = out[:, :o_channels, :, :]
        mask = out[:, o_channels:, :, :].sigmoid()

        return deform_conv2d(
            input=x,
            offset=offset,
            weight=self.weight,
            bias=self.bias,
            stride=self.stride,
            padding=self.padding,
            dilation=self.dilation,
            mask=mask
        )
    
class ConvLIF(nn.Module):
    """Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)."""

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
        """Initialize Conv layer with given arguments including activation."""
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
    def forward(self, x):
        """Apply convolution, batch normalization and activation to input tensor."""
        T, B, C, H, W = x.shape
        x = self.lif(x)
        return self.bn(self.conv(x.flatten(0, 1))).reshape(T, B, -1, H, W)

class TargetAwareFusion(nn.Module):
    def __init__(self, c, layer_idx):
        super(TargetAwareFusion, self).__init__()
        self.layer_idx = layer_idx
        self.cos = torch.nn.functional.cosine_similarity
        self.sigmoid = torch.sigmoid
        self.eps_norm = 1e-6  # safe norm eps
        # layers (保持你原有的结构)
        self.add_module(f'rgb_deformable_layer_{layer_idx}',
                        ModulatedDeformConv2dPack(c, c, kernel_size=3, padding=1))
        self.add_module(f'lwir_deformable_layer_{layer_idx}',
                        ModulatedDeformConv2dPack(c, c, kernel_size=3, padding=1))
        # LIF for each branch (time-aware)
        self.lif1 = MultiStepLIFNode(detach_reset=True, v_reset=0.0, backend='torch')
        self.lif2 = MultiStepLIFNode(detach_reset=True, v_reset=0.0, backend='torch')

        # fusion conv operates on flattened (T*B, 2C, H, W)
        self.add_module(f'fusion_conv_{layer_idx}', nn.Conv2d(2*c, c, kernel_size=1, bias=False))
        self.add_module(f'fusion_bn_{layer_idx}', nn.BatchNorm2d(c))
        self.add_module(f'fusion_act_{layer_idx}', nn.ReLU(inplace=True))

        # keep ConvLIF (expects time dimension) for dsc
        self.add_module(f'dsc_layer_{layer_idx}', ConvLIF(c, c, 1, g=c))
        self.add_module(f'se_layer_{layer_idx}', SELayer(c))

        # mask layer: ConvLIF sequence (time-aware)
        self.add_module(f'mask_layer_{layer_idx}',
                        nn.Sequential(
                            *[ConvLIF(c, 64, 3),
                              ConvLIF(64, 64, 3)]))
        # post-mask conv (2D)
        self.conv = Conv(64, 1, act=False)

        # MLP gating with LIF variants (time-aware)
        self.add_module(f'mlp_layer_{layer_idx}',
                        nn.Sequential(*[LIF_MLPConv(c, c//16, 1),
                                        LIF_MLPConv(c//16, c, 1)]))
        self.add_module(f'spatial_layer_{layer_idx}', SpatialGate())

        # thresholds for safety
        self.ACT_CLAMP = 1e3
        self.LOGIT_CLAMP = 20.0
        self.SLOGIT_CLAMP = 10.0

    def safe_l2_normalize(self, x, dim, eps=None):
        """x: tensor, normalize along dim with eps clamp using torch.norm"""
        if eps is None:
            eps = self.eps_norm
        # ensure tensor
        if not torch.is_tensor(x):
            return x
        denom = torch.norm(x, dim=dim, keepdim=True).clamp(min=eps)
        return x / denom

    def forward(self, rgb_x, lwir_x):
        """
        Inputs:
            rgb_x, lwir_x: [T, B, C, H, W]
        Returns:
            tmp_fused_res: [T, B, C, H, W]
            pred_mask_logits: [T*B, 1, H, W]  (2D conv output after flatten)
            s_logits: [T*B, C, 1, 1]
        """
        T, B, C, H, W = rgb_x.shape
        N = T * B  # flattened batch-time

        # 1) deformable conv per-frame: produce [N, C, H, W]
        rgb_flat = rgb_x.flatten(0, 1)   # [N, C, H, W]
        lwir_flat = lwir_x.flatten(0, 1) # [N, C, H, W]
        tmp_rx = getattr(self, f'rgb_deformable_layer_{self.layer_idx}')(rgb_flat)
        tmp_lx = getattr(self, f'lwir_deformable_layer_{self.layer_idx}')(lwir_flat)

        # 2) reshape back to time-aware format for LIF (ConvLIF expects [T, B, C, H, W])
        tmp_rx = tmp_rx.reshape(T, B, C, H, W)
        tmp_lx = tmp_lx.reshape(T, B, C, H, W)

        # 3) LIF spiking update (time-aware)
        tmp_rx = self.lif1(tmp_rx)
        tmp_lx = self.lif2(tmp_lx)

        # 4) flatten time again for 2D fusion conv
        tmp_rx_flat = tmp_rx.flatten(0, 1)  # [N, C, H, W]
        tmp_lx_flat = tmp_lx.flatten(0, 1)  # [N, C, H, W]

        # Concatenate along channel and fuse: [N, 2C, H, W] -> [N, C, H, W]
        fused = torch.cat([tmp_rx_flat, tmp_lx_flat], dim=1)
        fused = getattr(self, f'fusion_conv_{self.layer_idx}')(fused)
        fused = getattr(self, f'fusion_bn_{self.layer_idx}')(fused)
        fused = getattr(self, f'fusion_act_{self.layer_idx}')(fused)

        # safety: clip/nan guard after fusion conv
        fused = torch.nan_to_num(fused, nan=0.0, posinf=1e4, neginf=-1e4)
        fused = fused.clamp(min=-self.ACT_CLAMP, max=self.ACT_CLAMP)

        # 5) reshape to time-aware for ConvLIF dsc layer: [N, C, H, W] -> [T, B, C, H, W]
        fused_tb = fused.reshape(T, B, C, H, W)
        fused_tb = getattr(self, f'dsc_layer_{self.layer_idx}')(fused_tb)  # ConvLIF -> still [T,B,C,H,W]

        # safety: convert to [N, C, H, W] for SE (SE operates per sample)
        fused_se_in = fused_tb.flatten(0, 1)  # [N, C, H, W]

        # 6) SELayer (expects [N, C, H, W] then does GAP)
        # SELayer uses AdaptiveAvgPool2d and Linear; protect values before it
        fused_se_in = torch.nan_to_num(fused_se_in, nan=0.0, posinf=1e4, neginf=-1e4)
        fused_se_in = fused_se_in.clamp(min=-self.ACT_CLAMP, max=self.ACT_CLAMP)
        fused_se = getattr(self, f'se_layer_{self.layer_idx}')(fused_se_in)  # [N, C, H, W]

        # 7) more safety checks & reshape back
        if torch.isnan(fused_se).any():
            print(f"[Warning] NaN after SE at layer {self.layer_idx}")
        if (fused_se.abs() > self.ACT_CLAMP).any():
            print(f"[Warning] large values after SE at layer {self.layer_idx}: max {fused_se.abs().max().item():.3e}")

        fused_tb = fused_se.reshape(T, B, C, H, W)

        # 8) mask prediction: mask_layer is ConvLIF sequence (time-aware), returns [T,B,64,H,W] -> then conv -> [N,1,H,W]
        pred_mask = getattr(self, f'mask_layer_{self.layer_idx}')(fused_tb)  # time-aware
        pred_mask_flat_feats = pred_mask.flatten(0, 1)  # [N, 64, H, W]
        pred_mask_logits = self.conv(pred_mask_flat_feats)  # [N,1,H,W]

        # safety on mask logits
        pred_mask_logits = torch.nan_to_num(pred_mask_logits, nan=0.0, posinf=1e4, neginf=-1e4)
        pred_mask_logits = pred_mask_logits.clamp(min=-self.LOGIT_CLAMP, max=self.LOGIT_CLAMP)

        # 9) prepare a and b for cosine
        # a: [N,1,HW], b: [N,C,HW]
        a = torch.sigmoid(pred_mask_logits).reshape(N, 1, -1)   # [N,1,HW]
        b = fused_tb.flatten(0, 1).reshape(N, C, -1)           # [N,C,HW]

        # safety: check shapes and finite
        if not (a.shape[0] == b.shape[0] == N):
            raise RuntimeError(f"shape mismatch in gating: a {a.shape}, b {b.shape}, expected N={N}")
        a = torch.nan_to_num(a, nan=0.0, posinf=1e4, neginf=-1e4)
        b = torch.nan_to_num(b, nan=0.0, posinf=1e4, neginf=-1e4)

        # 10) expand a to C channels and safe-normalize along HW dim (dim=2)
        a_exp = a.expand(-1, C, -1)  # [N, C, HW]
        a_norm = self.safe_l2_normalize(a_exp, dim=2)
        b_norm = self.safe_l2_normalize(b, dim=2)

        # 11) cosine per-channel: (a_norm * b_norm).sum(dim=2) -> [N, C]
        v = (a_norm * b_norm).sum(dim=2)  # stable dot product
        if torch.isnan(v).any():
            print(f"[Warning] NaN in cosine v at layer {self.layer_idx}")

        # 12) reshape v to time-aware for mlp_layer which expects [T,B,C,1,1]
        v_tb = v.reshape(T, B, C, 1, 1)

        # 13) mlp gating (LIF_MLPConv expects time-aware); returns [T,B,C,1,1]
        s_logits_tb = getattr(self, f'mlp_layer_{self.layer_idx}')(v_tb)
        s_logits_flat = s_logits_tb.flatten(0, 1)  # [N, C, 1, 1]

        # safety on s_logits
        s_logits_flat = torch.nan_to_num(s_logits_flat, nan=0.0, posinf=1e4, neginf=-1e4)
        s_logits_flat = s_logits_flat.clamp(min=-self.SLOGIT_CLAMP, max=self.SLOGIT_CLAMP)

        # 14) gating: apply sigmoid(s_logits) per-channel to fused features
        gate = torch.sigmoid(s_logits_flat)  # [N, C, 1, 1]
        fused_flat = fused_se  # [N, C, H, W]  (we already have SE-applied flattened)
        fused_gated = fused_flat * gate  # broadcast multiply

        # 15) spatial attention (expects 2D batch [N, C, H, W])
        fused_gated = getattr(self, f'spatial_layer_{self.layer_idx}')(fused_gated)

        # 16) final reshape back to time-aware
        tmp_fused_res = fused_gated.reshape(T, B, C, H, W)

        # final safety
        tmp_fused_res = torch.nan_to_num(tmp_fused_res, nan=0.0, posinf=1e4, neginf=-1e4)
        tmp_fused_res = tmp_fused_res.clamp(min=-self.ACT_CLAMP, max=self.ACT_CLAMP)

        return tmp_fused_res, pred_mask_logits, s_logits_flat



class MS_StandardConv(nn.Module):
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1):  # in_channels(out_channels), 内部扩张比例
        super().__init__()
        self.c1 = c1
        self.c2 = c2
        self.s = s
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        # self.lif = mem_update()
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

    def forward(self, x):
        T, B, C, H, W = x.shape
        # LOGGER.info(f"MS_StandardConv Input Shape: {x.shape}")

        # 1️⃣ LIF 处理时序维度
        x = self.lif(x)  # 形状依然是 [T, B, C, H, W]
        # LOGGER.info(f"After LIF: {x.shape}")

        # 2️⃣ 把时间维和 batch 维合并，才能送入 Conv2d
        x = x.contiguous().flatten(0, 1)  # [T*B, C, H, W]
        # LOGGER.info(f"After Flatten: {x.shape}")

        # 3️⃣ 卷积 + BN
        x = self.conv(x)  # [T*B, c2, H_out, W_out]
        x = self.bn(x)
        # LOGGER.info(f"After Conv+BN: {x.shape}")

        # 4️⃣ 再 reshape 回 [T, B, c2, H_out, W_out]
        x = x.reshape(T, B, self.c2, int(H / self.s), int(W / self.s))
        # LOGGER.info(f"MS_StandardConv Output Shape: {x.shape}")

        return x

class LIF_MLPConv(nn.Module):
    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1):  # in_channels(out_channels), 内部扩张比例
        super().__init__()
        self.c1 = c1
        self.c2 = c2
        self.s = s
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        # self.lif = mem_update()
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

    def forward(self, x):
        T, B, C, H, W = x.shape
        x = self.conv(self.lif(x).flatten(0, 1)).reshape(T, B, self.c2, int(H / self.s), int(W / self.s))
        return x


class MS_DownSampling(nn.Module):
    def __init__(self, in_channels=2, embed_dims=256, kernel_size=3, stride=2, padding=1, first_layer=True):
        super().__init__()
        self.in_channels = in_channels
        self.encode_conv = nn.Conv2d(in_channels, embed_dims, kernel_size=kernel_size, stride=stride, padding=padding)
        self.encode_bn = nn.BatchNorm2d(embed_dims)
        if not first_layer:
            # self.encode_lif = mem_update()
            self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        # self.pool = nn.MaxPool2d(kernel_size=2)

    def forward(self, x):
        T, B, _, _, _ = x.shape

        if hasattr(self, "encode_lif"):  # 如果不是第一层
            # x_pool = self.pool(x)
            x = self.encode_lif(x)
        # LOGGER.info(f"MS_DownSampling Input Shape: {x.shape}")
        # LOGGER.info(f"in_channels is {self.in_channels}")
        x = self.encode_conv(x.flatten(0, 1))
        # LOGGER.info(f"After Conv: {x.shape}")
        
        _, C, H, W = x.shape
        x = self.encode_bn(x).reshape(T, B, -1, H, W).contiguous()

        return x


class MS_GetT(nn.Module):

    swanlab_idx = [x for x in range(0, 50)]   # swanlab 可视化轮数索引
    idx = 0

    def __init__(self, in_channels=1, out_channels=1, T=4):
        super().__init__()
        self.T = T
        self.in_channels = in_channels

    def forward(self, x):
        
        if not self.training and self.idx in self.swanlab_idx:  # 仅在第一次调用时打印
            self.log_each_channel(x, tag="input_image")  # swanlab 可视化输入图像的每个通道
        self.idx += 1  # 累计调用次数，用于 swanlab 可视化

        x = (x.unsqueeze(0)).repeat(self.T, 1, 1, 1, 1)
        return x
    
    def log_each_channel(self, x, tag="input_image"):
        """
        将形状为 [1, 3, H, W] 的图像，按通道分离为灰度图上传至 SwanLab。
        """
        x = x[0]  # [3, H, W]
        channel_names = ['R', 'G', 'B']
        
        for i in range(3):
            ch = x[i]  # shape: [H, W]
            ch = ch.cpu().numpy()

            # 标准化到 [0, 255] 再转 uint8
            ch = (ch - ch.min()) / (ch.max() - ch.min() + 1e-5) * 255
            ch = ch.astype(np.uint8)

            # 转成 3 通道灰度图以适配 swanlab.Image 的 RGB 显示要求
            ch_rgb = np.stack([ch] * 3, axis=-1)  # [H, W, 3]

            # swan_img = swanlab.Image(ch_rgb, caption=f"{tag} - {channel_names[i]} Channel")
            # swanlab.log({f"{tag}_channel_{channel_names[i]}": swan_img})


class MS_CancelT(nn.Module):
    def __init__(self, in_channels=1, out_channels=1, T=2):
        super().__init__()
        self.T = T

    def forward(self, x):
        x = x.mean(0)
        return x


class SpikeConv(nn.Module):
    # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
    default_act = nn.SiLU()  # default activation

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        # self.lif = mem_update()
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')
        self.bn = nn.BatchNorm2d(c2)
        self.s = s
        # self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()

    def forward(self, x):
        T, B, C, H, W = x.shape
        H_new = int(H / self.s)
        W_new = int(W / self.s)
        x = self.lif(x)
        x = self.bn(self.conv(x.flatten(0, 1))).reshape(T, B, -1, H_new, W_new)
        return x


class SpikeConvWithoutBN(nn.Module):
    # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=True)
        # self.lif = mem_update()
        self.lif = MultiStepLIFNode(detach_reset=True, v_reset=None, backend='torch')

        self.s = s
        # self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()

    def forward(self, x):
        T, B, C, H, W = x.shape
        H_new = int(H / self.s)
        W_new = int(W / self.s)
        x = self.lif(x)
        x = self.conv(x.flatten(0, 1)).reshape(T, B, -1, H_new, W_new)
        return x


class SpikeSPPF(nn.Module):
    # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
    def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))
        super().__init__()
        c_ = c1 // 2  # hidden channels
        self.cv1 = SpikeConv(c1, c_, 1, 1)
        self.cv2 = SpikeConv(c_ * 4, c2, 1, 1)
        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)

    def forward(self, x):
        x = self.cv1(x)
        with warnings.catch_warnings():
            T, B, C, H, W = x.shape
            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning
            y1 = self.m(x.flatten(0, 1)).reshape(T, B, -1, H, W)
            y2 = self.m(y1.flatten(0, 1)).reshape(T, B, -1, H, W)
            y3 = self.m(y2.flatten(0, 1)).reshape(T, B, -1, H, W)
            return self.cv2(torch.cat((x, y1, y2, y3), 2))


class MS_Concat(nn.Module):
    # Concatenate a list of tensors along dimension
    def __init__(self, dimension=1):
        super().__init__()
        self.d = dimension

    def forward(self, x):  # 这里输入x是一个list
        for i in range(len(x)):
            if x[i].dim() == 5:
                x[i] = x[i].mean(0)
        return torch.cat(x, self.d)
