# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
Implementation of VisionTransformer
Based on timm and DeiT code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
https://github.com/facebookresearch/deit/
"""
import math

import torch
import torch.nn as nn
from functools import partial

from timm.models.vision_transformer import _cfg, Mlp
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from entmax import sparsemax, entmax15 

from einops import rearrange, repeat

class PositionalEncodingFourier(nn.Module):
    """
    Positional encoding relying on a fourier kernel matching the one used in the
    "Attention is all of Need" paper. The implementation builds on DeTR code
    https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
    """

    def __init__(self, hidden_dim=32, dim=768, temperature=10000):
        super().__init__()
        self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
        self.scale = 2 * math.pi
        self.temperature = temperature
        self.hidden_dim = hidden_dim
        self.dim = dim

    def forward(self, B, H, W):
        mask = torch.zeros(B, H, W).bool().to(self.token_projection.weight.device)
        not_mask = ~mask
        y_embed = not_mask.cumsum(1, dtype=torch.float32)
        x_embed = not_mask.cumsum(2, dtype=torch.float32)
        eps = 1e-6
        y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
        x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale

        dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=mask.device)
        dim_t = self.temperature ** (2 * (dim_t // 2) / self.hidden_dim)

        pos_x = x_embed[:, :, :, None] / dim_t
        pos_y = y_embed[:, :, :, None] / dim_t
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
                             pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
                             pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
        pos = self.token_projection(pos)
        return pos


def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return torch.nn.Sequential(
        nn.Conv2d(
            in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
        ),
        nn.SyncBatchNorm(out_planes)
    )


class ConvPatchEmbed(nn.Module):
    """ Image to Patch Embedding using multiple convolutional layers
    """

    def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
        super().__init__()
        img_size = to_2tuple(img_size)
        patch_size = to_2tuple(patch_size)
        num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
        self.img_size = img_size
        self.patch_size = patch_size
        self.num_patches = num_patches

        if patch_size[0] == 16:
            self.proj = torch.nn.Sequential(
                conv3x3(3, embed_dim // 8, 2),
                nn.GELU(),
                conv3x3(embed_dim // 8, embed_dim // 4, 2),
                nn.GELU(),
                conv3x3(embed_dim // 4, embed_dim // 2, 2),
                nn.GELU(),
                conv3x3(embed_dim // 2, embed_dim, 2),
            )
        elif patch_size[0] == 8:
            self.proj = torch.nn.Sequential(
                conv3x3(3, embed_dim // 4, 2),
                nn.GELU(),
                conv3x3(embed_dim // 4, embed_dim // 2, 2),
                nn.GELU(),
                conv3x3(embed_dim // 2, embed_dim, 2),
            )
        else:
            raise("For convolutional projection, patch size has to be in [8, 16]")

    def forward(self, x, padding_size=None):
        B, C, H, W = x.shape
        x = self.proj(x)
        Hp, Wp = x.shape[2], x.shape[3]
        x = x.flatten(2).transpose(1, 2)

        return x, (Hp, Wp)


class ClassAttention(nn.Module):
    """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
    """

    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, x):
        B, N, C = x.shape # x: [2, 197, 384]
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
        # qkv: [2, 197, 3, 8, 48]
        qkv = qkv.permute(2, 0, 3, 1, 4) # qkv: [3, 2, 8, 197, 48]
        q, k, v = qkv[0], qkv[1], qkv[2]   # make torchscript happy (cannot use tensor as tuple)
        # q: [2, 8, 197, 48]; k: [2, 8, 197, 48]; v: [2, 8, 197, 48];
        qc = q[:, :, 0:1]   # CLS token qc: [2, 8, 1, 48]
        # qc * k: [2, 8, 1, 48] * [2, 8, 197, 48] => [2, 8, 197, 48]
        # (qc * k).sum(dim=-1): [2, 8, 197, 48] => [2, 8, 197]
        attn_cls = (qc * k).sum(dim=-1) * self.scale # attn_cls: [2, 8, 197]
        attn_cls = attn_cls.softmax(dim=-1) # attn_cls: [2, 8, 197]
        attn_cls = self.attn_drop(attn_cls) # attn_cls: [2, 8, 197]
        # attn_cls.unsqueeze(2): [2, 8, 197] => [2, 8, 1, 197]
        # attn_cls.unsqueeze(2) @ v: [2, 8, 1, 197] @ [2, 8, 197, 48] => [2, 8, 1, 48]
        # (attn_cls.unsqueeze(2) @ v).transpose(1, 2): [2, 1, 8, 48]
        cls_tkn = (attn_cls.unsqueeze(2) @ v).transpose(1, 2).reshape(B, 1, C) # cls_tkn: [2, 1, 384]
        cls_tkn = self.proj(cls_tkn) # cls_tkn: [2, 1, 384]
        # self.proj_drop(cls_tkn): [2, 1, 384]
        # x[:, 1:]: [2, 197, 384] => [2, 196, 384]
        x = torch.cat([self.proj_drop(cls_tkn), x[:, 1:]], dim=1) # x: [2, 197, 384]
        return x
    

class ClassAttentionBlock(nn.Module):
    """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239
    """

    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
                 attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=None,
                 tokens_norm=False):
        super().__init__()
        self.norm1 = norm_layer(dim)

        self.attn = ClassAttention(
            dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
            proj_drop=drop
        )

        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
                       drop=drop)

        if eta is not None:     # LayerScale Initialization (no layerscale when None)
            self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
            self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
        else:
            self.gamma1, self.gamma2 = 1.0, 1.0

        # FIXME: A hack for models pre-trained with layernorm over all the tokens not just the CLS
        self.tokens_norm = tokens_norm

    def forward(self, x, H, W, mask=None):
        # x: torch.Size([2, 197, 384]); H: 14; W: 14; mask: None;
        # self.attn(self.norm1(x)): [2, 197, 384]
        x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
        # x: [2, 197, 384]
        if self.tokens_norm:
            x = self.norm2(x)
        else:
            x[:, 0:1] = self.norm2(x[:, 0:1])
        x_res = x # x_res: [2, 197, 384]
        cls_token = x[:, 0:1] # cls_token: [2, 1, 384]
        cls_token = self.gamma2 * self.mlp(cls_token) # cls_token: [2, 1, 384]
        x = torch.cat([cls_token, x[:, 1:]], dim=1) # x: [2, 197, 384]
        x = x_res + self.drop_path(x) # x: [2, 197, 384]
        return x

class Attention(nn.Module):
    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        assert dim % num_heads == 0, 'dim should be divisible by num_heads'
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = head_dim ** -0.5

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, x):
        B, N, C = x.shape
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q, k, v = qkv.unbind(0)   # make torchscript happy (cannot use tensor as tuple)

        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x

class XCA(nn.Module):
    """ Cross-Covariance Attention (XCA) operation where the channels are updated using a weighted
     sum. The weights are obtained from the (softmax normalized) Cross-covariance
    matrix (Q^T K \\in d_h \\times d_h)
    """

    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        self.num_heads = num_heads
        self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))

        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

    def forward(self, x):
        B, N, C = x.shape
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
        qkv = qkv.permute(2, 0, 3, 1, 4)
        q, k, v = qkv[0], qkv[1], qkv[2]   # make torchscript happy (cannot use tensor as tuple)

        q = q.transpose(-2, -1)
        k = k.transpose(-2, -1)
        v = v.transpose(-2, -1)

        q = torch.nn.functional.normalize(q, dim=-1)
        k = torch.nn.functional.normalize(k, dim=-1)

        attn = (q @ k.transpose(-2, -1)) * self.temperature
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x

    @torch.jit.ignore
    def no_weight_decay(self):
        return {'temperature'}

class AttentionTSSA(nn.Module):
    def __init__(self, dim, num_heads = 8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        self.heads = num_heads
        self.attend = nn.Softmax(dim = 1)
        self.attn_drop = nn.Dropout(attn_drop)
        self.qkv = nn.Linear(dim, dim, bias=qkv_bias)
        self.temp = nn.Parameter(torch.ones(num_heads, 1)) # 温度参数
        self.to_out = nn.Sequential(
            nn.Linear(dim, dim),
            nn.Dropout(proj_drop)
        )
    
    def forward(self, x): # x: [B, 196, 384]
        w = rearrange(self.qkv(x), 'b n (h d) -> b h n d', h = self.heads)
        # nn.Linear只对最后一维应用，因此h=8, N=196, d=48，dim=h*d 384=48*8
        b, h, N, d = w.shape # w: (2, 8, 196, 48)
        w_normed = torch.nn.functional.normalize(w, dim=-2) # (2, 8, 196, 48)
        w_sq = w_normed ** 2  # (2, 8, 196, 48)
        # Pi from Eq. 10 in the paper：对w_sq(2, 8, 196, 48)的最后一维求和，并去掉最后一维变为(2, 8, 196)
        Pi = self.attend(torch.sum(w_sq, dim=-1) * self.temp) # b * h * n Pi:(2, 8, 196) 
        # Pi.sum(dim=-1, keepdim=True) => (2, 8, 1)
        # Pi / (Pi.sum(dim=-1, keepdim=True) + 1e-8) => (2, 8, 196)
        # (Pi / (Pi.sum(dim=-1, keepdim=True) + 1e-8)).unsqueeze(-2) => (2, 8, 1, 196)
        # torch.matmul (2, 8, 1, 196) * (2, 8, 196, 48) => dots: (2, 8, 1, 48)
        dots = torch.matmul((Pi / (Pi.sum(dim=-1, keepdim=True) + 1e-8)).unsqueeze(-2), w ** 2)
        attn = 1. / (1 + dots) # attn: (2, 8, 1, 48)
        attn = self.attn_drop(attn) # attn: (2, 8, 1, 48)
        # Pi.unsqueeze(-1) => (2, 8, 196, 1)
        # w.mul(Pi.unsqueeze(-1)) (2, 8, 196, 48) 元素乘 (2, 8, 196, 1) => (2, 8, 196, 48)
        # torch.mul (2, 8, 196, 48) 元素乘 (2, 8, 1, 48)
        out = - torch.mul(w.mul(Pi.unsqueeze(-1)), attn)
        out = rearrange(out, 'b h n d -> b n (h d)') # out: (2, 196, 384)
        return self.to_out(out) # 返回值：(2, 196, 384)

    @torch.jit.ignore
    def no_weight_decay(self):
        return {'temp'}
    
class AttentionMSSA(nn.Module):
    """
    多头稀疏自注意力机制 (Multi-head Sparse Self-Attention)
    适配Token Statistics Self Attention架构
    dim, num_heads = 8, 
    """
    def __init__(self, dim=384, num_heads=8, head_dim=48, sparsity_type='sparsemax', dropout=0.1, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
        super().__init__()
        self.dim = dim
        self.num_heads = num_heads
        self.head_dim = head_dim
        self.sparsity_type = sparsity_type
        
        # 确保维度匹配
        assert dim == num_heads * head_dim, f"dim {dim} != num_heads {num_heads} * head_dim {head_dim}"
        
        # 投影层 - 使用K,V投影（遵循CRATE设计）
        self.key_proj = nn.Linear(dim, dim, bias=False)
        self.value_proj = nn.Linear(dim, dim, bias=False)
        
        # 输出投影
        self.output_proj = nn.Linear(dim, dim)
        self.dropout = nn.Dropout(dropout)
        
        # 缩放因子
        self.scale = head_dim ** -0.5
        
        # 稀疏化函数选择
        if sparsity_type == 'sparsemax':
            self.sparse_func = sparsemax
        elif sparsity_type == 'entmax15':
            self.sparse_func = entmax15
        else:
            raise ValueError(f"Unsupported sparsity type: {sparsity_type}")
    
    def forward(self, x, return_attention=False):
        """
        Args:
            x: [batch_size, seq_len, dim] e.g., [batch_size, 196, 384]
            return_attention: 是否返回注意力矩阵用于分析
        Returns:
            output: [batch_size, seq_len, dim]
            attention_weights: [batch_size, num_heads, seq_len, seq_len] (if return_attention=True)
        """
        batch_size, seq_len, _ = x.shape
        
        # 投影到Key和Value空间
        k = self.key_proj(x)  # [batch_size, seq_len, dim]
        v = self.value_proj(x)  # [batch_size, seq_len, dim]
        
        # 重整形为多头格式
        k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # [batch_size, num_heads, seq_len, head_dim]
        v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)  # [batch_size, num_heads, seq_len, head_dim]
        
        # 计算相似度矩阵 (使用Key作为查询的基础)
        # 这里可以看作是每个token与所有token的相似度
        attn_scores = torch.matmul(k, k.transpose(-2, -1)) * self.scale  # [batch_size, num_heads, seq_len, seq_len]
        
        # 应用稀疏注意力
        if self.sparsity_type in ['sparsemax', 'entmax15']:
            # 在序列维度应用稀疏化
            attn_weights = self.sparse_func(attn_scores, dim=-1)
        else:
            # 备用方案：使用softmax（不推荐，但作为fallback）
            attn_weights = F.softmax(attn_scores, dim=-1)
        
        # 应用注意力到Value
        attn_output = torch.matmul(attn_weights, v)  # [batch_size, num_heads, seq_len, head_dim]
        
        # 合并多头
        attn_output = attn_output.transpose(1, 2).contiguous().view(
            batch_size, seq_len, self.dim)  # [batch_size, seq_len, dim]
        
        # 输出投影
        output = self.output_proj(attn_output)
        output = self.dropout(output)
        
        if return_attention:
            return output, attn_weights
        return output

class Block(nn.Module):
    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0.,
                 attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
                 attn_layer=AttentionTSSA, num_tokens=196, eta=None):
        super().__init__()
        # 打印所有参数值
        '''
        dim: 384; num_heads: 8; mlp_ratio: 4;
        qkv_bias: True; qk_scale: None; drop: 0.0;
        attn_drop: 0.0; drop_path: 0.05; act_layer: GELU; norm_layer: LayerNorm;
        attn_layer: <class 'tost.AttentionTSSA'>; num_tokens: 196; eta: 1.0;
        '''
        self.norm1 = norm_layer(dim)
        self.attn = attn_layer(
            dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
            proj_drop=drop
        )
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)

        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer,
                       drop=drop)

        # self.norm3 = norm_layer(dim)
        # self.local_mp = LPI(in_features=dim, act_layer=act_layer)

        self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
        self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
        # self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)

    def forward(self, x, H, W): # x: (2, 196, 384)
        # self.attn(self.norm1(x)): (2, 196, 384)
        # self.drop_path(self.gamma1 * self.attn(self.norm1(x))): (2, 196, 384)
        x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x)))
        # x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W))
        # self.mlp(self.norm2(x)): (2, 196, 384)
        x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
        return x # x: (2, 196, 384)


class VisionTransformer(nn.Module):
    """
    Based on timm and DeiT code bases
    https://github.com/rwightman/pytorch-image-models/tree/master/timm
    https://github.com/facebookresearch/deit/
    """

    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768,
                 depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None,
                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, attn_layer=AttentionTSSA,
                 cls_attn_layers=2, use_pos=True, patch_proj='linear', eta=None, tokens_norm=False):
        """
        Args:
            img_size (int, tuple): input image size
            patch_size (int, tuple): patch size
            in_chans (int): number of input channels
            num_classes (int): number of classes for classification head
            embed_dim (int): embedding dimension
            depth (int): depth of transformer
            num_heads (int): number of attention heads
            mlp_ratio (int): ratio of mlp hidden dim to embedding dim
            qkv_bias (bool): enable bias for qkv if True
            qk_scale (float): override default qk scale of head_dim ** -0.5 if set
            drop_rate (float): dropout rate
            attn_drop_rate (float): attention dropout rate
            drop_path_rate (float): stochastic depth rate
            norm_layer: (nn.Module): normalization layer
            cls_attn_layers: (int) Depth of Class attention layers
            use_pos: (bool) whether to use positional encoding
            eta: (float) layerscale initialization value
            tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA
        """
        super().__init__()
        self.num_classes = num_classes
        self.num_features = self.embed_dim = embed_dim
        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
        # img_size: 224; embed_dim: 384; patch_size: 16;
        self.patch_embed = ConvPatchEmbed(img_size=img_size, embed_dim=embed_dim,
                                          patch_size=patch_size)

        num_patches = self.patch_embed.num_patches

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_drop = nn.Dropout(p=drop_rate)
        # attn_layer: tost.AttentionTSSA; norm_layer: LayerNorm
        dpr = [drop_path_rate for i in range(depth)]
        self.mssa_depth = 2 # MSSA层数
        # 定义5个分支
        self.branches_num = 5
        # # 添加两层MSSA层，用于捕获更加丰富的信息
        self.mssas = nn.ModuleList([
            Block(
                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
                qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
                norm_layer=norm_layer, attn_layer=AttentionMSSA, num_tokens=num_patches, eta=eta)
            for i in range(self.mssa_depth)
        ])
        # 主体用TSSA线性注意力机制
        self.blocks = nn.ModuleList([
            Block(
                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
                qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i],
                norm_layer=norm_layer, attn_layer=attn_layer, num_tokens=num_patches, eta=eta)
            for i in range(depth - self.mssa_depth)])

        self.cls_attn_blocks = nn.ModuleList([
            ClassAttentionBlock(
                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
                qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer,
                eta=eta, tokens_norm=tokens_norm)
            for i in range(cls_attn_layers)])
        self.norm = norm_layer(embed_dim)
        self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()

        self.pos_embeder = PositionalEncodingFourier(dim=embed_dim)
        self.use_pos = use_pos

        # Classifier head
        trunc_normal_(self.cls_token, std=.02)
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    @torch.jit.ignore
    def no_weight_decay(self):
        return {'pos_embed', 'cls_token', 'dist_token'}

    def forward_features(self, x):
        B, C, H, W = x.shape
        # x: batch_size, patches, features (1, 196, 384) [196=14*14] patchSize=16
        # Hp 行方向patch数量，Wp 列方向patch数量
        x, (Hp, Wp) = self.patch_embed(x)
        # 位置嵌入
        if self.use_pos:
            pos_encoding = self.pos_embeder(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
            x = x + pos_encoding
        x = self.pos_drop(x) # x = (bs, 196, 384)
        # 添加一层标准的MHSA
        x_raws = None
        x_0 = None
        # for bi in range(self.branches_num):
        #     for blk in self.branches[bi]['mssas']:
        #         # x_raw = blk(x, Hp, Wp)
        #         x_0 = blk(x, Hp, Wp)
        #         if x_raws is None:
        #             x_raws = x_0.to('cuda:0')
        #         else:
        #             torch.vstack((x_raws, x_0.to('cuda:0')))
        # # 将x_raws中分支输出，选出最佳的一个分支往下执行（这里还未添加逻辑！！！！！！！！！！！！！！）
        # branch_idx = 0 # 分支索引号
        # x = x_raws[branch_idx]
        for blk in self.mssas:
            x = blk(x, Hp, Wp)
        # AttentionTSSA
        for blk in self.blocks:
            x = blk(x, Hp, Wp)
        # x: (2, 196, 384)
        # self.cls_token: (1, 1, 384); B=2
        cls_tokens = self.cls_token.expand(B, -1, -1) # cls_tokens: (2, 1, 384)
        x = torch.cat((cls_tokens, x), dim=1) # (2, 197, 384)
        for blk in self.cls_attn_blocks:
            x = blk(x, Hp, Wp)
        x = self.norm(x)[:, 0]
        return x

    def forward(self, x):
        x = self.forward_features(x)
        x = self.head(x)
        if self.training:
            return x, x
        else:
            return x


# Patch size 16x16 models

# tost models
@register_model
def tost_cab_tiny_12_p16(pretrained=False, **kwargs):
    # 去掉多余的可选参数 闫涛 2025
    kwargs.pop('pretrained_cfg', None)
    kwargs.pop('pretrained_cfg_overlay', None)
    kwargs.pop('cache_dir', None)
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_small_12_p16(pretrained=False, **kwargs):
    # 去掉多余的可选参数 闫涛 2025
    kwargs.pop('pretrained_cfg', None)
    kwargs.pop('pretrained_cfg_overlay', None)
    kwargs.pop('cache_dir', None)
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_tiny_24_p16(pretrained=False, **kwargs):
    # 去掉多余的可选参数 闫涛 2025
    kwargs.pop('pretrained_cfg', None)
    kwargs.pop('pretrained_cfg_overlay', None)
    kwargs.pop('cache_dir', None)
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_small_24_p16(pretrained=False, **kwargs):
    # 去掉多余的可选参数 闫涛 2025
    kwargs.pop('pretrained_cfg', None)
    kwargs.pop('pretrained_cfg_overlay', None)
    kwargs.pop('cache_dir', None)
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_medium_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_large_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model

# xcit models
@register_model
def xcit_cab_tiny_12_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def xcit_cab_small_12_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def xcit_cab_tiny_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def xcit_cab_small_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def xcit_cab_medium_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def xcit_cab_large_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=XCA, **kwargs)
    model.default_cfg = _cfg()
    return model

# vit models
@register_model
def vit_cab_tiny_12_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def vit_cab_small_12_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def vit_cab_tiny_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def vit_cab_small_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def vit_cab_medium_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def vit_cab_large_24_p16(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=16, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=Attention, **kwargs)
    model.default_cfg = _cfg()
    return model
    
###############################################################################################################
# Patch size 8x8 models
@register_model
def tost_cab_tiny_12_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_small_12_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1.0, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_tiny_24_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=192, depth=24, num_heads=4, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_small_24_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=384, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_medium_24_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=512, depth=24, num_heads=8, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model


@register_model
def tost_cab_large_24_p8(pretrained=False, **kwargs):
    model = VisionTransformer(
        patch_size=8, embed_dim=768, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
        norm_layer=partial(nn.LayerNorm, eps=1e-6), eta=1e-5, tokens_norm=True, attn_layer=AttentionTSSA, **kwargs)
    model.default_cfg = _cfg()
    return model
