# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.

Copy-paste from torch.nn.Transformer with modifications:
    * positional encodings are passed in MHattention
    * extra LN at the end of encoder is removed
    * decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List

import torch
import torch.nn.functional as F
from torch import nn, Tensor
from mamba_ssm import Mamba

# ----------------重新阅读到此------重新debug到此--------------先阅读build_model构建模型---->阅读build_transformer--->阅读Transformer
class Transformer(nn.Module):  # 构建整个Transformer结构

    def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,  # d_model为模型输入维度
                 num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False,  # norm_before：在标准的T模型中，LayerNorm 通常是在每个子层（多头自注意力层和前馈全连接层）之后进行的，被称为 "norm after"，
                 return_intermediate_dec=False):             # 但有研究表明，在进行自注意力计算或前馈全连接计算之前进行归一化可能会更有利于模训型的练和收敛，被称为 "norm before"
        super().__init__()

        encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,  # 构建单个encoder层
                                                dropout, activation, normalize_before)
        decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
                                                dropout, activation, normalize_before)  # 构建单个decoder层
        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)  # 构建整个encoder编码器
        decoder_norm = nn.LayerNorm(d_model)
        self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,  # 构建整个decoder解码器，并且return_intermediate是在decoder输出的
                                          return_intermediate=return_intermediate_dec)

        self._reset_parameters()  # 初始化模型参数

        self.d_model = d_model
        self.nhead = nhead

    def _reset_parameters(self):  # 定义初始化模型参数函数
        for p in self.parameters():
            if p.dim() > 1:  # 检查参数的维度是否大于 1 ，权重矩阵和偏置向量等参数需要初始化，这些参数的维度通常大于 1
                nn.init.xavier_uniform_(p)  #  提供 Xavier 均匀初始化方法，它会将参数 p 初始化为服从均匀分布的随机值，确保前向传播和反向传播过程中的梯度保持稳定

    def forward(self, src, mask, query_embed, pos_embed):  # src是输入的特征张量，mask是掩码，query_embed是查询嵌入(decoder预测输入embed)，pos_embed是位置嵌入
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = src.shape
        src = src.flatten(2).permute(2, 0, 1)  # 维度(bs, c, h, w) --> (bs, c, h*w) --> (h*w, bs, c)  # torch.Size([2, 256, 24, 36]) --> torch.Size([864, 2, 256]).其中flatten(dim)方法将张量沿着指定的维度dim进行展平，将该维度及其之后的所有维度合并成一个维度；permute函数对展平后的张量进行维度重排
        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)  # 维度变化同上  # torch.Size([2, 256, 24, 36]) --> torch.Size([864, 2, 256])
        query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)  # 维度(q, c) --> (q, 1, c) --> (q, bs, c)  # torch.Size([100, 256]) --> torch.Size([100, 2, 256]).其中unsqueeze(1)的作用是在第1维上增加一个维度；repeat(1, bs, 1) 的作用是沿着各个维度复制张量，它会沿着第0维复制1次（即不复制）、沿着第1维复制 bs 次、沿着第2维复制1次
        mask = mask.flatten(1)  # 维度(bs, h, w) --> (bs, h*w)  # torch.Size([2, 24, 36]) --> torch.Size([2, 864]) # （mask 用于数据增强？× 把所有图片统一大小√）

        tgt = torch.zeros_like(query_embed)  # query，生成和括号内变量维度维度一致的全是零的内容，论文提到，因为一开始不清楚需要什么样的目标，所以初始化为0
        memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)  # memory：torch.Size([768, 2, 864]) # key_padding_mask提供padding填充位置（用于数据增强？× 把所有序列统一长度√），在注意力机制中应给予忽略
        hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,  # tgt(q)[100, 2, 256], memory(k,v)[864, 2, 256]
                          pos=pos_embed, query_pos=query_embed)  # hs:torch.Size([6, 100, 2, 256]) # 包含了6层的decoder输出
        return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)  # hs-->[6, 2, 100, 256]  memory-->[2, 256, 24, 36]


class Mambaformer(nn.Module):  # 构建整个Transformer结构

    def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,  # d_model为模型输入维度
                 num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False,  # norm_before：在标准的T模型中，LayerNorm 通常是在每个子层（多头自注意力层和前馈全连接层）之后进行的，被称为 "norm after"，
                 return_intermediate_dec=False,  # 但有研究表明，在进行自注意力计算或前馈全连接计算之前进行归一化可能会更有利于模训型的练和收敛，被称为 "norm before"
                 num_queries=100):
        super().__init__()

        encoder_layer = MambaFFNEncoderLayer(d_model, dim_feedforward,  # 构建单个Mambaencoder层
                                             dropout, activation, normalize_before)
        decoder_layer = MambaTransFFNDecoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward,
                                                  dropout=dropout, activation=activation, normalize_before=normalize_before, num_queries=num_queries)  # 构建单个Mamba_decoder层
        # decoder_layer = MambaformerDecoderLayer(d_model, nhead, dim_feedforward,
        #                                         dropout, activation, normalize_before)  # 构建单个Mamba_decoder层

        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)  # 构建整个encoder编码器
        decoder_norm = nn.LayerNorm(d_model)
        self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,  # 构建整个decoder解码器，并且return_intermediate是在decoder输出的
                                          return_intermediate=return_intermediate_dec)

        self._reset_parameters()  # 初始化模型参数

        self.d_model = d_model
        self.nhead = nhead

    def _reset_parameters(self):  # 定义初始化模型参数函数
        for p in self.parameters():
            if p.dim() > 1:  # 检查参数的维度是否大于 1 ，权重矩阵和偏置向量等参数需要初始化，这些参数的维度通常大于 1
                nn.init.xavier_uniform_(p)  #  提供 Xavier 均匀初始化方法，它会将参数 p 初始化为服从均匀分布的随机值，确保前向传播和反向传播过程中的梯度保持稳定

    def forward(self, src, mask, query_embed, pos_embed):  # src是输入的特征张量，mask是掩码，query_embed是查询嵌入(decoder预测输入embed)，pos_embed是位置嵌入
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = src.shape
        src = src.flatten(2).permute(2, 0, 1)  # 维度(bs, c, h, w) --> (bs, c, h*w) --> (h*w, bs, c)  # torch.Size([2, 256, 24, 36]) --> torch.Size([864, 2, 256]).其中flatten(dim)方法将张量沿着指定的维度dim进行展平，将该维度及其之后的所有维度合并成一个维度；permute函数对展平后的张量进行维度重排
        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)  # 维度变化同上  # torch.Size([2, 256, 24, 36]) --> torch.Size([864, 2, 256])
        query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)  # 维度(q, c) --> (q, 1, c) --> (q, bs, c)  # torch.Size([100, 256]) --> torch.Size([100, 2, 256]).其中unsqueeze(1)的作用是在第1维上增加一个维度；repeat(1, bs, 1) 的作用是沿着各个维度复制张量，它会沿着第0维复制1次（即不复制）、沿着第1维复制 bs 次、沿着第2维复制1次
        mask_befor = mask
        mask = mask.flatten(1)  # 维度(bs, h, w) --> (bs, h*w)  # torch.Size([2, 24, 36]) --> torch.Size([2, 864]) # （mask 用于数据增强？× 把所有图片统一大小√）

        tgt = torch.zeros_like(query_embed)  # query，生成和括号内变量维度维度一致的全是零的内容，论文提到，因为一开始不清楚需要什么样的目标，所以初始化为0
        memory = self.encoder(src, pos=pos_embed)  # key_padding_mask提供padding填充位置（用于数据增强？× 把所有序列统一长度√），在注意力机制中应给予忽略  # memory：torch.Size([768, 2, 864])
        hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
                          pos=pos_embed, query_pos=query_embed, mask=mask_befor)  # hs:torch.Size([6, 100, 2, 256]) # 包含了6层的decoder输出
        return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)  # hs-->[6, 2, 100, 256]  memory-->[2, 256, 24, 36]


class TransformerEncoder(nn.Module):

    def __init__(self, encoder_layer, num_layers, norm=None):
        super().__init__()
        self.layers = _get_clones(encoder_layer, num_layers)
        self.num_layers = num_layers
        self.norm = norm

    def forward(self, src,
                mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None):
        output = src

        for layer in self.layers:
            output = layer(output, src_mask=mask,
                           src_key_padding_mask=src_key_padding_mask, pos=pos)

        if self.norm is not None:
            output = self.norm(output)

        return output


class TransformerDecoder(nn.Module):

    def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
        super().__init__()
        self.layers = _get_clones(decoder_layer, num_layers)
        self.num_layers = num_layers
        self.norm = norm
        self.return_intermediate = return_intermediate

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        output = tgt

        intermediate = []

        for layer in self.layers:
            output = layer(output, memory, tgt_mask=tgt_mask,
                           memory_mask=memory_mask,
                           tgt_key_padding_mask=tgt_key_padding_mask,
                           memory_key_padding_mask=memory_key_padding_mask,
                           pos=pos, query_pos=query_pos, mask=mask)
            if self.return_intermediate:  # return_intermediate=ture, 返回各个编码器层输出
                intermediate.append(self.norm(output))

        if self.norm is not None:
            output = self.norm(output)
            if self.return_intermediate:
                intermediate.pop()
                intermediate.append(output)

        if self.return_intermediate:
            return torch.stack(intermediate)

        return output.unsqueeze(0)


class TransformerEncoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)  # d_model=args.hidden_dim=256，表示输入通道数；nhead=args.nheads=8；dropout=0.1
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)  # activation="relu"
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self,
                     src,
                     src_mask: Optional[Tensor] = None,  # 猜测：encoder中此项不设置，则src_mask=None，因此在encoder注意力机制中不使用mask
                     src_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None):
        q = k = self.with_pos_embed(src, pos)
        src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
                              key_padding_mask=src_key_padding_mask)[0]
        src = src + self.dropout1(src2)
        src = self.norm1(src)
        src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
        src = src + self.dropout2(src2)
        src = self.norm2(src)
        return src

    def forward_pre(self, src,
                    src_mask: Optional[Tensor] = None,
                    src_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None):
        src2 = self.norm1(src)
        q = k = self.with_pos_embed(src2, pos)
        src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
                              key_padding_mask=src_key_padding_mask)[0]
        src = src + self.dropout1(src2)
        src2 = self.norm2(src)
        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
        src = src + self.dropout2(src2)
        return src

    def forward(self, src,
                src_mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,  # key_padding_mask提供padding填充位置（用于数据增强？× 把所有序列统一长度√），在注意力机制中应给予忽略
                pos: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
        return self.forward_post(src, src_mask, src_key_padding_mask, pos)


class TransformerDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, tgt, memory,  # tgt:[100, 2, 256], memory:[768, 2, 864]
                     tgt_mask: Optional[Tensor] = None,
                     memory_mask: Optional[Tensor] = None,
                     tgt_key_padding_mask: Optional[Tensor] = None,
                     memory_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None,
                     query_pos: Optional[Tensor] = None):
        q = k = self.with_pos_embed(tgt, query_pos)
        tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]
        tgt = tgt + self.dropout1(tgt2)
        tgt = self.norm1(tgt)
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),  # tgt:[100, 2, 256]
                                   key=self.with_pos_embed(memory, pos),  # memory:[768, 2, 864]
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)
        tgt = self.norm2(tgt)
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
        tgt = tgt + self.dropout3(tgt2)
        tgt = self.norm3(tgt)
        return tgt

    def forward_pre(self, tgt, memory,
                    tgt_mask: Optional[Tensor] = None,
                    memory_mask: Optional[Tensor] = None,
                    tgt_key_padding_mask: Optional[Tensor] = None,
                    memory_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)
        q = k = self.with_pos_embed(tgt2, query_pos)
        tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]
        tgt = tgt + self.dropout1(tgt2)
        tgt2 = self.norm2(tgt)
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)
        tgt2 = self.norm3(tgt)
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
        tgt = tgt + self.dropout3(tgt2)
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
                                    tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
        return self.forward_post(tgt, memory, tgt_mask, memory_mask,
                                 tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)


class Attn_MambaEncoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)  # d_model=args.hidden_dim=256，表示输入通道数；nhead=args.nheads=8；dropout=0.1
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256
        # Implementation of mamba
        self.Mamba = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)  # activation="relu"
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self,
                     src,
                     src_mask: Optional[Tensor] = None,  # 猜测：encoder中此项不设置，则src_mask=None，因此在encoder注意力机制中不使用mask
                     src_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None):
        q = k = self.with_pos_embed(src, pos)  # 位置编码
        src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
                              key_padding_mask=src_key_padding_mask)[0]  # 自注意力模块
        src = src + self.dropout1(src2)  # 自注意力的残差
        src = self.norm1(src)  # 自注意力的norm

        src2 = self.Mamba(src)  # mamba模块替换FFN  # src[456, 2, 256]---->src2[456, 2, 256]
        src = src + self.dropout2(src2)  # mamba的残差
        src = self.norm2(src)  # mamba的norm
        return src

    def forward_pre(self, src,
                    src_mask: Optional[Tensor] = None,
                    src_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None):
        src2 = self.norm1(src)  # 自注意力的norm
        q = k = self.with_pos_embed(src2, pos)  # 位置编码
        src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
                              key_padding_mask=src_key_padding_mask)[0]  # 自注意力模块
        src = src + self.dropout1(src2)  # 自注意力的残差

        src2 = self.norm2(src)  # mamba的norm
        src2 = self.Mamba(src2)  # mamba模块替换FFN
        src = src + self.dropout2(src2)  # mamba的残差
        return src

    def forward(self, src,
                src_mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,  # key_padding_mask提供padding填充位置（用于数据增强？× 把所有序列统一长度√），在注意力机制中应给予忽略
                pos: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
        return self.forward_post(src, src_mask, src_key_padding_mask, pos)


class Attn_MambaDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, num_queries=100, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256
        # Implementation of mamba
        self.Mamba = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, tgt, memory,
                     tgt_mask: Optional[Tensor] = None,
                     memory_mask: Optional[Tensor] = None,
                     tgt_key_padding_mask: Optional[Tensor] = None,
                     memory_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None,
                     query_pos: Optional[Tensor] = None):
        q = k = self.with_pos_embed(tgt, query_pos)  # 位置编码
        tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]  # 自注意力
        tgt = tgt + self.dropout1(tgt2)  # 自注意力的残差
        tgt = self.norm1(tgt) # 自注意力的norm
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]  # 交叉注意力
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差
        tgt = self.norm2(tgt)  # 交叉注意力的norm

        tgt2 = self.Mamba(tgt)  # mamba结构替换FFN
        tgt = tgt + self.dropout3(tgt2)  # FFN的残差
        tgt = self.norm3(tgt)  # FFN的norm
        return tgt

    def forward_pre(self, tgt, memory,
                    tgt_mask: Optional[Tensor] = None,
                    memory_mask: Optional[Tensor] = None,
                    tgt_key_padding_mask: Optional[Tensor] = None,
                    memory_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)  # 自注意力的norm
        q = k = self.with_pos_embed(tgt2, query_pos)
        tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]
        tgt = tgt + self.dropout1(tgt2)  # 自注意力的残差
        tgt2 = self.norm2(tgt)  # 交叉注意力的norm
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差

        tgt2 = self.norm3(tgt)   # mamba的norm
        tgt2 = self.Mamba(tgt2)  # mamba替换FFN
        tgt = tgt + self.dropout3(tgt2)  # mamba的残差
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
                                    tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
        return self.forward_post(tgt, memory, tgt_mask, memory_mask,
                                 tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)


class MambaFFNEncoderLayer(nn.Module):

    def __init__(self, d_model, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256
        # Implementation of mamba
        self.Mamba = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)  # activation="relu"
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, src,
                     src_mask: Optional[Tensor] = None,
                     src_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None):
        src = self.with_pos_embed(src, pos)  # 添加位置编码
        src2 = self.Mamba(src)  # mamba替换self_attn

        src = src + self.dropout1(src2)  # mamba的残差
        src = self.norm1(src)  # mamba的norm
        src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))  # 此处FFN层

        src = src + self.dropout2(src2)  # FFN的残差
        src = self.norm2(src)  # FFN的norm
        return src

    def forward_pre(self, src,
                    src_mask: Optional[Tensor] = None,
                    src_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None):
        src2 = self.norm1(src)  # mamba的norm
        src = self.with_pos_embed(src2, pos)  # 添加位置编码
        src2 = self.Mamba(src)  # mamba替换self_attn
        src = src + self.dropout1(src2)  # mamba的残差

        src2 = self.norm2(src)  # FFN的norm
        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))  # FFN层
        src = src + self.dropout2(src2)  # FFN的残差
        return src

    def forward(self, src,
                src_mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(src, pos)
        return self.forward_post(src, pos)


class MambaFFNDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, num_queries=100, nums_inter=24, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of mamba
        self.Mamba1 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        self.Mamba2 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        # linear_interpolation cross
        self.nums_inter = nums_inter
        self.linear_inter = nn.Linear(self.nums_inter * self.nums_inter, num_queries)
        self.linear_cat = nn.Linear(d_model * 2, d_model)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def linear_interpolation(self, memory, mask, nums_inter=24):
        if mask is None:
            raise ValueError("mask cannot be None")
        h_m = mask.shape[1]  # [2, 24, 36] 取高宽中的高
        _, bs, c = memory.shape  # [864, 2, 256]
        memory_in = memory.permute(1, 2, 0).reshape(bs, c, h_m, -1)  # [864, 2, 256]--->[2, 256, 24, 36] 恢复尺寸大小
        memory_inter = F.interpolate(memory_in, size=(nums_inter, nums_inter), mode='bilinear', align_corners=False)  # [2, 256, 24, 36]--->[2, 256, 20, 20] 双线性插值
        memory_out = memory_inter.reshape(bs, c, -1).permute(2, 0, 1)  # [2, 256, 20, 20]--->[2, 256, 400]--->[400, 2, 256]
        return memory_out

    def forward_post(self, tgt, memory,  # tgt就是query（原皮），memory是encoder输出
                     pos: Optional[Tensor] = None,
                     query_pos: Optional[Tensor] = None,
                     mask: Optional[Tensor] = None):  # query_pos为query的pos
        tgt = self.with_pos_embed(tgt, query_pos)
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差
        tgt = self.norm1(tgt)  # mamba1的norm

        memory_inter = self.linear_interpolation(memory=self.with_pos_embed(memory, pos), mask=mask, nums_inter=24)  # encode交叉过来，进行线性插值统一尺寸[lenth, 2, 256]--->[576, 2, 256]
        memory2 = self.linear_inter(memory_inter.permute(1, 2, 0)).permute(2, 0, 1)  # [576, 2, 256]--->[100, 2, 256]
        memory_cat_tgt = torch.cat((memory2, self.with_pos_embed(tgt, query_pos)), dim=2)  # [100, 2, 256]+[100, 2, 256]---->[100, 2, 512]
        memory_tgt = self.linear_cat(memory_cat_tgt)  # [100, 2, 512]--->[100, 2, 256]

        tgt2 = self.Mamba2(memory_tgt)  # mamba替换cross_atte
        tgt = memory_tgt + self.dropout2(tgt2)  # mamba2的残差
        tgt = self.norm2(tgt)  # mamba2的norm

        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))  # FFN层
        tgt = tgt + self.dropout3(tgt2)
        tgt = self.norm3(tgt)
        return tgt

    def forward_pre(self, tgt, memory,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None,
                    mask: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)  # mamba1的norm
        tgt = self.with_pos_embed(tgt2, query_pos)
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差

        memory_inter = self.linear_interpolation(memory=self.with_pos_embed(memory, pos), mask=mask, nums_inter=24)  # encode交叉过来，进行线性插值统一尺寸[lenth, 2, 256]--->[576, 2, 256]
        memory2 = self.linear_inter(memory_inter.permute(1, 2, 0)).permute(2, 0, 1)  # [576, 2, 256]--->[100, 2, 256]
        memory_cat_tgt = torch.cat((memory2, self.with_pos_embed(tgt, query_pos)), dim=2)  # [100, 2, 256]+[100, 2, 256]---->[100, 2, 512]
        memory_tgt = self.linear_cat(memory_cat_tgt)  # [100, 2, 512]--->[100, 2, 256]
        tgt2 = self.norm2(memory_tgt)  # 交叉mamba2的norm
        tgt2 = self.Mamba2(tgt2)  # mamba结构替换cross_attn
        tgt = memory_tgt + self.dropout2(tgt2)  # 交叉mamba的残差

        tgt2 = self.norm3(tgt)  # FFN层
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
        tgt = tgt + self.dropout3(tgt2)
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, pos=pos, query_pos=query_pos, mask=mask)
        return self.forward_post(tgt, memory, pos=pos, query_pos=query_pos, mask=mask)


class MambaEncoderLayer(nn.Module):

    def __init__(self, d_model, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of mamba
        self.Mamba1 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        self.Mamba2 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        # self.activation = _get_activation_fn(activation)  # activation="relu"
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, src,
                     src_mask: Optional[Tensor] = None,  # 猜测：encoder中此项不设置，则src_mask=None，因此在encoder注意力机制中不使用mask
                     src_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None):
        src = self.with_pos_embed(src, pos)  # 添加位置编码
        src2 = self.Mamba1(src)  # mamba替换self_attn
        src = src + self.dropout1(src2)  # mamba1的残差
        src = self.norm1(src)  # mamba1的norm

        src2 = self.Mamba2(src)  # mamba2模块  # src[456, 2, 256]---->src2[456, 2, 256]
        src = src + self.dropout2(src2)  # mamba2的残差
        src = self.norm2(src)  # mamba2的norm
        return src

    def forward_pre(self, src,
                    src_mask: Optional[Tensor] = None,
                    src_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None):
        src2 = self.norm1(src)  # mamba1的norm
        src = self.with_pos_embed(src2, pos)  # 添加位置编码
        src2 = self.Mamba1(src)  # mamba替换self_attn
        src = src + self.dropout1(src2)  # mamba1的残差

        src2 = self.norm2(src)  # mamba2的norm
        src2 = self.Mamba2(src2)  # mamba2模块
        src = src + self.dropout2(src2)  # mamba2的残差
        return src

    def forward(self, src,
                src_mask: Optional[Tensor] = None,
                src_key_padding_mask: Optional[Tensor] = None,  # key_padding_mask提供padding填充位置（用于数据增强？× 把所有序列统一长度√），在注意力机制中应给予忽略
                pos: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(src, pos)
        return self.forward_post(src, pos)


class MambaDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, num_queries=100, nums_inter=24, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of mamba
        self.Mamba1 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        self.Mamba2 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        self.Mamba3 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba

        # linear_interpolation cross
        self.nums_inter = nums_inter
        self.linear_inter = nn.Linear(self.nums_inter * self.nums_inter, num_queries)
        self.linear_cat = nn.Linear(d_model * 2, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        # self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def linear_interpolation(self, memory, mask, nums_inter=24):
        if mask is None:
            raise ValueError("mask cannot be None")
        h_m = mask.shape[1]  # [2, 24, 36] 取高宽中的高
        _, bs, c = memory.shape  # [864, 2, 256]
        memory_in = memory.permute(1, 2, 0).reshape(bs, c, h_m, -1)  # [864, 2, 256]--->[2, 256, 24, 36] 恢复尺寸大小
        memory_inter = F.interpolate(memory_in, size=(nums_inter, nums_inter), mode='bilinear', align_corners=False)  # [2, 256, 24, 36]--->[2, 256, 20, 20] 双线性插值
        memory_out = memory_inter.reshape(bs, c, -1).permute(2, 0, 1)  # [2, 256, 20, 20]--->[2, 256, 400]--->[400, 2, 256]
        return memory_out

    def forward_post(self, tgt, memory,  # tgt就是query（原皮），memory是encoder输出
                     pos: Optional[Tensor] = None,
                     query_pos: Optional[Tensor] = None,
                     mask: Optional[Tensor] = None):
        tgt = self.with_pos_embed(tgt, query_pos)
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差
        tgt = self.norm1(tgt)  # mamba1的norm

        memory_inter = self.linear_interpolation(memory=self.with_pos_embed(memory, pos), mask=mask, nums_inter=24)  # encode交叉过来，进行线性插值统一尺寸[lenth, 2, 256]--->[576, 2, 256]
        memory2 = self.linear_inter(memory_inter.permute(1, 2, 0)).permute(2, 0, 1)  # [576, 2, 256]--->[100, 2, 256]
        memory_cat_tgt = torch.cat((memory2, self.with_pos_embed(tgt, query_pos)), dim=2)  # [100, 2, 256]+[100, 2, 256]---->[100, 2, 512]
        memory_tgt = self.linear_cat(memory_cat_tgt)  # [100, 2, 512]--->[100, 2, 256]

        tgt2 = self.Mamba2(memory_tgt)  # mamba替换cross_atte
        tgt = tgt + self.dropout2(tgt2)  # mamba2的残差
        tgt = self.norm2(tgt)  # mamba2的norm

        tgt2 = self.Mamba3(tgt)  # mamba结构替换FFN层
        tgt = tgt + self.dropout3(tgt2)  # mamba3的残差
        tgt = self.norm3(tgt)  # mamba2的norm
        return tgt

    def forward_pre(self, tgt, memory,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None,
                    mask: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)  # mamba1的norm
        tgt = self.with_pos_embed(tgt2, query_pos)
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差

        memory_inter = self.linear_interpolation(memory=self.with_pos_embed(memory, pos), mask=mask, nums_inter=24)  # encode交叉过来，进行线性插值统一尺寸[lenth, 2, 256]--->[576, 2, 256]
        memory2 = self.linear_inter(memory_inter.permute(1, 2, 0)).permute(2, 0, 1)  # [576, 2, 256]--->[100, 2, 256]
        memory_cat_tgt = torch.cat((memory2, self.with_pos_embed(tgt, query_pos)), dim=2)  # [100, 2, 256]+[100, 2, 256]---->[100, 2, 512]
        memory_tgt = self.linear_cat(memory_cat_tgt)  # [100, 2, 512]--->[100, 2, 256]
        tgt2 = self.norm2(memory_tgt)  # 交叉mamba的norm
        tgt2 = self.Mamba2(tgt2)
        tgt = tgt + self.dropout2(tgt2)  # 交叉mamba的残差

        tgt2 = self.norm3(tgt)   # mamba3的norm
        tgt2 = self.Mamba3(tgt2)  # mamba替换FFN层
        tgt = tgt + self.dropout3(tgt2)  # mamba3的残差
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, pos=pos, query_pos=query_pos, mask=mask)
        return self.forward_post(tgt, memory, pos=pos, query_pos=query_pos, mask=mask)


class MambaformerDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, num_queries=100, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of attn
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of mamba
        self.Mamba1 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        self.Mamba2 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, tgt, memory,  # tgt就是query（原皮），memory是encoder输出
                     memory_mask: Optional[Tensor] = None,
                     memory_key_padding_mask: Optional[Tensor] = None,  # memory_key_padding_mask为特征提取的mask
                     pos: Optional[Tensor] = None,  # pos是位置编码
                     query_pos: Optional[Tensor] = None):  # query_pos为query的pos
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差
        tgt = self.norm1(tgt)  # mamba1的norm

        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]  # 交叉注意力
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差
        tgt = self.norm2(tgt)  # 交叉注意力的norm

        tgt2 = self.Mamba2(tgt)  # mamba结构替换FFN层
        tgt = tgt + self.dropout3(tgt2)  # mamba2的残差
        tgt = self.norm3(tgt)  # mamba2的norm
        return tgt

    def forward_pre(self, tgt, memory,
                    memory_mask: Optional[Tensor] = None,
                    memory_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)  # mamba1的norm
        tgt2 = self.Mamba1(tgt2)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差

        tgt2 = self.norm2(tgt)  # 交叉注意力的norm
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差

        tgt2 = self.norm3(tgt)   # mamba2的norm
        tgt2 = self.Mamba2(tgt2)
        tgt = tgt + self.dropout3(tgt2)  # mamba2的残差
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, memory_mask,
                                    memory_key_padding_mask, pos, query_pos)
        return self.forward_post(tgt, memory, memory_mask,
                                 memory_key_padding_mask, pos, query_pos)


class MambaTransFFNDecoderLayer(nn.Module):

    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False, num_queries=100, mamba_d_state=16, mamba_conv=4):
        super().__init__()
        # Implementation of attn
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of mamba
        self.Mamba1 = Mamba(d_model, d_state=mamba_d_state, d_conv=mamba_conv, device='cuda')  # 此处添加mamba
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)  # d_model=256，dim_feedforward=2048
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)  # dim_feedforward=2048，d_model=256

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, tgt, memory,  # tgt就是query（原皮），memory是encoder输出
                     memory_mask: Optional[Tensor] = None,
                     memory_key_padding_mask: Optional[Tensor] = None,  # memory_key_padding_mask为特征提取的mask
                     pos: Optional[Tensor] = None,  # pos是位置编码
                     query_pos: Optional[Tensor] = None):  # query_pos为query的pos
        tgt2 = self.Mamba1(tgt)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差
        tgt = self.norm1(tgt)  # mamba1的norm

        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]  # 交叉注意力
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差
        tgt = self.norm2(tgt)  # 交叉注意力的norm

        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))  # FFN层
        tgt = tgt + self.dropout3(tgt2)
        tgt = self.norm3(tgt)
        return tgt

    def forward_pre(self, tgt, memory,
                    memory_mask: Optional[Tensor] = None,
                    memory_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)  # mamba1的norm
        tgt2 = self.Mamba1(tgt2)  # mamba结构替换self_attn
        tgt = tgt + self.dropout1(tgt2)  # mamba1的残差

        tgt2 = self.norm2(tgt)  # 交叉注意力的norm
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)  # 交叉注意力的残差

        tgt2 = self.norm3(tgt)  # FFN层
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
        tgt = tgt + self.dropout3(tgt2)
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None,
                mask: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, memory_mask,
                                    memory_key_padding_mask, pos, query_pos)
        return self.forward_post(tgt, memory, memory_mask,
                                 memory_key_padding_mask, pos, query_pos)



def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])


def build_transformer(args):
    if args.mamba_in:
        return Mambaformer(
            d_model=args.hidden_dim,  # transformer输入通道数，default=256
            dropout=args.dropout,
            nhead=args.nheads,  # 多头注意力头数
            dim_feedforward=args.dim_feedforward,  # 前馈网络层（FFN）输入通道数
            num_encoder_layers=args.enc_layers,
            num_decoder_layers=args.dec_layers,
            normalize_before=args.pre_norm,
            return_intermediate_dec=True,
            num_queries=args.num_queries
        )
    else:
        return Transformer(
            d_model=args.hidden_dim,  # transformer输入通道数，default=256
            dropout=args.dropout,
            nhead=args.nheads,  # 多头注意力头数
            dim_feedforward=args.dim_feedforward,  # 前馈网络层（FFN）输入通道数
            num_encoder_layers=args.enc_layers,
            num_decoder_layers=args.dec_layers,
            normalize_before=args.pre_norm,
            return_intermediate_dec=True
        )
    # FormerModelclass = Mambaformer if args.mamba_in else Transformer
    # return FormerModelclass(
    #     d_model=args.hidden_dim,  # transformer输入通道数，default=256
    #     dropout=args.dropout,
    #     nhead=args.nheads,  # 多头注意力头数
    #     dim_feedforward=args.dim_feedforward,  # 前馈网络层（FFN）输入通道数
    #     num_encoder_layers=args.enc_layers,
    #     num_decoder_layers=args.dec_layers,
    #     normalize_before=args.pre_norm,
    #     return_intermediate_dec=True
    # )



def _get_activation_fn(activation):
    """Return an activation function given a string"""
    if activation == "relu":
        return F.relu
    if activation == "gelu":
        return F.gelu
    if activation == "glu":
        return F.glu
    if activation == "silu":
        return F.silu
    raise RuntimeError(F"activation should be relu/gelu, not {activation}.")




