import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from timm.models.layers import DropPath, to_2tuple, trunc_normal_

class Mlp(nn.Module):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features

        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.drop1 = nn.Dropout(drop)
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop2 = nn.Dropout(drop)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop1(x)
        x = self.fc2(x)
        x = self.drop2(x)
        return x

def window_partition(x, window_size):
    """
    将feature map按照window_size划分成一个没有重叠的window
    Args:
        x: (B, H, W, C)
        window_size int: window size

    Returns:
        windows: (num_windows*B, window_size, window_size, C)
    """
    B, H, W, C = x.shape
    x = x.view(B, int(H // window_size), window_size, int(W // window_size), window_size, C)
    # permute: [B, H // M, M, W // M, M, C] -> [B, H // M, W // M, M, M, C]
    # view: [B, H // M, W // M, M, M, C] -> [B*num_windows, M, M, C]
    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
    return windows

def window_reverse(windows, window_size, H, W):
    """
    将一个个window还原成一个feature map
    Args:
        windows: (num_windows*B, window_size, window_size, C)
        window_size (int): Window size
        H (int): Height of image
        W (int): Width of image

    Returns:
        x: (B, H, W, C)
    """
    B = int(windows.shape[0] / (H * W / window_size / window_size))
    # view: [B*num_windows, M, M, C] -> [B, H // M, W // M, M, M, C]
    x = windows.view(B, int(H // window_size), int(W // window_size), window_size, window_size, -1)
    # permute: [B, H // M, W // M, M, M, C] -> [B, H // M, M, W // M, M, C]
    # view: [B, H // M, M, W // M, M, C] -> [B, H, W, C]
    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
    return x

class WindowAttention(nn.Module):
    r""" Window based multi-head self attention (W-MSA) module with relative position bias.
    It supports both of shifted and non-shifted window.

    Args:
        dim (int): Number of input channels.
        window_size (tuple[int]): The height and width of the window.
        num_heads (int): Number of attention heads.
        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
        proj_drop (float, optional): Dropout ratio of output. Default: 0.0
    """

    def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):

        super().__init__()
        self.dim = dim
        self.window_size = window_size  # Mh, Mw
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5

        self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)

        # mlp to generate continuous relative position bias
        self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
                                     nn.ReLU(inplace=True),
                                     nn.Linear(512, num_heads, bias=False))

        # get relative_coords_table
        relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
        relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
        relative_coords_table = torch.stack(
            torch.meshgrid([relative_coords_h,
                            relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0)  # 1, 2*Wh-1, 2*Ww-1, 2

        relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
        relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
        relative_coords_table *= 8  # normalize to -8, 8
        relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
            torch.abs(relative_coords_table) + 1.0) / np.log2(8)

        self.register_buffer("relative_coords_table", relative_coords_table)

        # get pair-wise relative position index for each token inside the window
        coords_h = torch.arange(self.window_size[0])    #Mh
        coords_w = torch.arange(self.window_size[1])    #Mw
        coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2, Mh, Mw
        coords_flatten = torch.flatten(coords, 1)  # 2, Mh*Mw
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 2, Mh*Mw, Mh*Mw
        relative_coords = relative_coords.permute(1, 2, 0).contiguous()  # Mh*Mw, Mh*Mw, 2
        relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0
        relative_coords[:, :, 1] += self.window_size[1] - 1
        relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
        relative_position_index = relative_coords.sum(-1)  # Mh*Mw, Mh*Mw
        self.register_buffer("relative_position_index", relative_position_index)

        self.qkv = nn.Linear(dim, dim * 3, bias=False)
        if qkv_bias:
            self.q_bias = nn.Parameter(torch.zeros(dim))
            self.v_bias = nn.Parameter(torch.zeros(dim))
        else:
            self.q_bias = None
            self.v_bias = None
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x, mask = None):
        """
        Args:
            x: input features with shape of (num_windows*B, N, C)
            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
        """
        # batch_size*num_windows, Mh*Mw, total_embed_dim
        B_, N, C = x.shape
        qkv_bias = None
        if self.q_bias is not None:
            qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
        qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
        qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)

        attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
        logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01)).requires_grad).exp()
        attn = attn * logit_scale

        relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
        relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
            self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)  # Wh*Ww,Wh*Ww,nH
        relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()  # nH, Wh*Ww, Wh*Ww
        relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
        attn = attn + relative_position_bias.unsqueeze(0)

        if mask is not None:
            # mask: [nW, Mh*Mw, Mh*Mw]
            nW = mask.shape[0]
            # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
            # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
            attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
            attn = attn.view(-1, self.num_heads, N, N)
            attn = self.softmax(attn)
        else:
            attn = self.softmax(attn)

        attn = self.attn_drop(attn)

        # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
        # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]
        # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]
        # B_ x N x C
        x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x

    # def extra_repr(self) -> str:
    #     return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
    #
    # def flops(self, N):
    #     # calculate flops for 1 window with token length of N
    #     flops = 0
    #     # qkv = self.qkv(x)
    #     flops += N * self.dim * 3 * self.dim
    #     # attn = (q @ k.transpose(-2, -1))
    #     flops += self.num_heads * N * (self.dim // self.num_heads) * N
    #     #  x = (attn @ v)
    #     flops += self.num_heads * N * N * (self.dim // self.num_heads)
    #     # x = self.proj(x)
    #     flops += N * self.dim * self.dim
    #     return flops

class WindowTransformerBlock(nn.Module):
    r""" Swin Transformer Block.

    Args:
        dim (int): Number of input channels.
        num_heads (int): Number of attention heads.
        window_size (int): Window size.
        shift_size (int): Shift size for SW-MSA.
        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
        drop (float, optional): Dropout rate. Default: 0.0
        attn_drop (float, optional): Attention dropout rate. Default: 0.0
        drop_path (float, optional): Stochastic depth rate. Default: 0.0
        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
    """

    def __init__(self, dim, num_heads, window_size = 7, shift_size = 0,
                 mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
        super().__init__()
        self.dim = dim
        self.num_heads = num_heads
        self.window_size = window_size
        self.shift_size = shift_size
        self.mlp_ratio = mlp_ratio

        assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"

        self.norm1 = norm_layer(dim)
        self.attn = WindowAttention(
            dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
            qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)

        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim,hidden_features=mlp_hidden_dim,act_layer=act_layer, drop=drop)

    def forward(self, x, attn_mask):
        H, W = self.H, self.W
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"

        shortcut = x
        x = x.view(B, H, W, C)

        # pad feature map to multiples of window size
        # 把feature map 给 pad 到 Window size 的整数倍
        pad_l = pad_t = 0
        pad_r = (self.window_size - W % self.window_size) % self.window_size
        pad_b = (self.window_size - H % self.window_size) % self.window_size
        x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
        _, Hp, Wp, _ = x.shape


        # cyclic shift
        if self.shift_size > 0:
            shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
        else:
            shifted_x = x
            attn_mask = None

        # partition windows
        x_windows = window_partition(shifted_x, self.window_size) # [nW*B, Mh, Mw, C]
        x_windows = x_windows.view(-1, self.window_size * self.window_size, C)  # nW*B, Mh*Mw, C

        # W-MSA/SW-MSA
        attn_windows = self.attn(x_windows, mask = attn_mask)  # nW*B, Mh*Mw, C

        # merge windows
        attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # nW*B, Mh, Mw, C
        shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # [B, H', W', C]


        # reverse cyclic shift
        if self.shift_size > 0:
            x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
        else:
            x = shifted_x

        if pad_r > 0 or pad_b > 0:
            # 把前面pad的数据移除掉
            x = x[:, :H, :W, :].contiguous()

        x = x.view(B, H * W, C)

        # FFN
        x = shortcut + self.drop_path(self.norm1(x))
        x = x + self.drop_path(self.norm2(self.mlp(x)))

        return x

    # def extra_repr(self) -> str:
    #     return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
    #            f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
    #
    # def flops(self):
    #     flops = 0
    #     H, W = self.input_resolution
    #     # norm1
    #     flops += self.dim * H * W
    #     # W-MSA/SW-MSA
    #     nW = H * W / self.window_size / self.window_size
    #     flops += nW * self.attn.flops(self.window_size * self.window_size)
    #     # mlp
    #     flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
    #     # norm2
    #     flops += self.dim * H * W
    #     return flops

class PatchMerging(nn.Module):
    r""" Patch Merging Layer. 下采样一次，通道数增加一倍，输出为二维的embedding
    Swin Transformer每一层下采样： 图像分成4块concat到一起，通道维度4倍->变成二维->通道维度除2

    Args:
        dim (int): Number of input channels.
        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
    """

    def __init__(self, dim, norm_layer=nn.LayerNorm):
        super().__init__()
        self.dim = dim
        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
        self.norm = norm_layer(4 * dim)

    def forward(self, x, H, W):
        """
        x: B, H*W, C
        """
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"

        x = x.view(B, H, W, C)

        # padding
        # 如果输入feature map的H，W不是2的整数倍，需要进行padding
        pad_input = (H % 2 != 0) or (W % 2 != 0)
        if pad_input:
            # to pad the last 3 dimensions, starting from the last dimension and moving forward.
            # (C_front, C_back, W_left, W_right, H_top, H_bottom)
            # 注意这里的Tensor通道是[B, H, W, C]，所以会和官方文档有些不同
            x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))

        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C

        x = self.norm(x)
        # B H/2*W/2 4*C -> B H/2*W/2 2*C
        x = self.reduction(x)

        return x

    # def extra_repr(self) -> str:
    #     return f"input_resolution={self.input_resolution}, dim={self.dim}"
    #
    # def flops(self):
    #     H, W = self.input_resolution
    #     flops = H * W * self.dim
    #     flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
    #     return flops

class BasicLayer(nn.Module):
    """ A basic Swin Transformer layer for one stage.

    Args:
        dim (int): Number of input channels.
        depth (int): Number of blocks.
        num_heads (int): Number of attention heads.
        window_size (int): Window size. Default: 最小尺度最小尺寸的一半
        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
        drop (float, optional): Dropout rate. Default: 0.0
        attn_drop (float, optional): Attention dropout rate. Default: 0.0
        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
    """

    def __init__(self, dim, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True,
                 qk_scale=None, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm,
                 downsample=None):

        super().__init__()
        self.dim = dim
        self.depth = depth
        self.window_size = window_size
        self.shift_size = window_size // 2      # 窗口向下向右移动的个数, 即窗口大小的一半

        # build blocks
        self.blocks = nn.ModuleList([
            WindowTransformerBlock(dim=dim, num_heads=num_heads,
                                 window_size = window_size,
                                 shift_size = 0 if (i % 2 == 0) else self.shift_size,
                                 mlp_ratio=mlp_ratio,
                                 qkv_bias=qkv_bias, qk_scale=qk_scale,
                                 drop=drop, attn_drop=attn_drop,
                                 drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
                                 norm_layer=norm_layer)
            for i in range(depth)])

        # patch merging layer
        if downsample is not None:
            self.downsample = downsample(dim=dim, norm_layer=norm_layer)
        else:
            self.downsample = None

    def create_mask(self, x, H, W):
        # calculate attention mask for SW-MSA
        # 保证Hp 和 Wp 是 Window_size的整数倍
        Hp = int(np.ceil(H / self.window_size)) * self.window_size
        Wp = int(np.ceil(W / self.window_size)) * self.window_size
        # 拥有和feature map 一样的通道排列顺序, 方便后续window_partition
        img_mask = torch.zeros((1, Hp, Wp, 1), device= x.device) # [1, Hp, Wp, 1]
        h_slices = (slice(0, -self.window_size),
                    slice(-self.window_size, -self.shift_size),
                    slice(-self.shift_size, None))
        w_slices = (slice(0, -self.window_size),
                    slice(-self.window_size, -self.shift_size),
                    slice(-self.shift_size, None))
        cnt = 0
        for h in h_slices:
            for w in w_slices:
                img_mask[:, h, w, :] = cnt
                cnt += 1

        mask_windows = window_partition(img_mask, self.window_size)  # [nW, Mh, Mw, 1]
        mask_windows = mask_windows.view(-1, self.window_size * self.window_size)  # [nW, Mh*Mw]
        attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)  # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]
        # [nW, Mh*Mw, Mh*Mw] 矩阵中对应窗口相同的区域为0
        attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
        return attn_mask

    def forward(self, x, H, W):
        attn_mask = self.create_mask(x, H, W) # [nW, Mh*Mw, Mh*Mw]
        for blk in self.blocks:
            blk.H, blk.W = H, W
            x = blk(x, attn_mask)
        if self.downsample is not None:
            x_down = self.downsample(x, H, W)       # 下采样后的二维embedding
            H, W = (H + 1) // 2, (W + 1) // 2       # 相当于四舍五入
            return [x, x_down], H, W
        else:
            return x, H, W

    # def extra_repr(self) -> str:
    #     return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
    #
    # def flops(self):
    #     flops = 0
    #     for blk in self.blocks:
    #         flops += blk.flops()
    #     if self.downsample is not None:
    #         flops += self.downsample.flops()
    #     return flops

class PatchEmbed(nn.Module):
    r""" Image to Patch Embedding

    Args:
        scale_ratio (int): 特征图下采样倍数. Default: 2.
        in_chans (int): Number of input image channels. Default: 256.
        embed_dim (int): Number of linear projection output channels. Default: 512.
        norm_layer (nn.Module, optional): Normalization layer. Default: None
    """

    def __init__(self, scale_ratio=2, in_chans=256, norm_layer=None):
        super().__init__()
        self.scale_ratio = scale_ratio
        self.in_chans = in_chans

        time = int(scale_ratio // 2)        # 下采样的次数，每次下采样为原来的 1/2

        # # 下采样
        # self.maxp = nn.ModuleList()
        #
        # for i in range(time):
        #     maxp = nn.Sequential(
        #             nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        #             # 下采样一次，通道数增大一倍
        #             nn.Conv2d(in_chans * (2 ** i), in_chans * (2 ** (i + 1)), kernel_size=1, stride=1),
        #             nn.BatchNorm2d(in_chans * (2 ** (i + 1))),
        #             nn.ReLU(inplace=True),
        #     )
        #     self.maxp.append(maxp)


        # # 混合下采样拼接
        # # MaxPooling下采样层组
        # self.maxp = nn.ModuleList()
        #
        # # 卷积下采样层组
        # self.conv = nn.ModuleList()
        #
        # # 融合组
        # self.fuse = nn.ModuleList()
        #
        # for i in range(time):
        #     maxp = nn.Sequential(
        #         nn.MaxPool2d(kernel_size = 3, stride = 2, padding=1),
        #         # 下采样一次，通道数增加保持不变
        #         nn.Conv2d(in_chans * (2 ** i), in_chans * (2 ** i), kernel_size=1, stride=1),
        #         nn.BatchNorm2d(in_chans * (2 ** i)),
        #     )
        #     self.maxp.append(maxp)
        #
        #     conv = nn.Sequential(
        #         # 下采样一次，通道数保持不变
        #         nn.Conv2d(in_chans * (2 ** i), in_chans * (2 ** i), kernel_size=3, stride=2, padding=1),
        #         nn.BatchNorm2d(in_chans * (2 ** i)),
        #     )
        #     self.conv.append(conv)
        #
        #     # 下采样一次， 通道数增大一倍
        #     fuse = nn.Sequential(
        #         # 将融合后 2倍通道数 卷积为 2倍通道数
        #         nn.Conv2d(in_chans * (2 ** (i + 1)), in_chans * (2 ** (i + 1)), kernel_size=1, stride=1),
        #         nn.BatchNorm2d(in_chans * (2 ** (i + 1))),
        #         nn.ReLU(inplace=True),
        #     )
        #     self.fuse.append(fuse)


        # 混合下采样相加
        # MaxPooling下采样层组
        self.maxp = nn.ModuleList()

        # 卷积下采样层组
        self.conv = nn.ModuleList()

        for i in range(time):
            maxp = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
                # 下采样一次，通道数增大一倍
                nn.Conv2d(in_chans * (2 ** i), in_chans * (2 ** (i + 1)), kernel_size=1, stride=1),
                nn.BatchNorm2d(in_chans * (2 ** (i + 1))),
                nn.ReLU(inplace=True),
            )
            self.maxp.append(maxp)

            conv = nn.Sequential(
                # 下采样一次，通道数增大一倍
                nn.Conv2d(in_chans * (2 ** i), in_chans * (2 ** (i + 1)), kernel_size=3, stride=2, padding=1),
                nn.BatchNorm2d(in_chans * (2 ** (i + 1))),
                nn.ReLU(inplace=True)
            )
            self.conv.append(conv)


        # 生成并处理的特征图的通道数, 即 输入通道数 * 2^(下采样次数)
        self.embed_dim = in_chans * (2 ** time)
        self.norm = norm_layer(self.embed_dim) if norm_layer else nn.Identity()

    def forward(self, x):
        _, _, H, W = x.shape

        # padding
        # 如果输入图像的 H, W 不能被 scale_ratio 整除, 需要进行 padding
        pad_input = (H % self.scale_ratio != 0) or (W % self.scale_ratio != 0)
        if pad_input:
            # to pad the last 3 dimensions,
            # (W_left, W_right, H_top, H_bootm, C_front, C_back)
            x = F.pad(x, (0, self.scale_ratio - W % self.scale_ratio,
                          0, self.scale_ratio - H % self.scale_ratio,
                          0, 0))
        # 下采样 scale_ratio 倍
        for i in range(len(self.maxp)):
            # # 下采样
            # x = self.maxp[i](x)

            # # 混合下采样拼接
            # x = torch.cat((self.maxp[i](x), self.conv[i](x)), dim = 1)
            # x = self.fuse[i](x)

            # 混合下采样相加
            x = self.maxp[i](x) + self.conv[i](x)

        _, _, H, W = x.shape
        x = x.flatten(2).transpose(1, 2)  # B Ph*Pw C
        x = self.norm(x)
        return x, H, W

    # def flops(self):
    #     Ho, Wo = self.patches_resolution
    #     flops = Ho * Wo * self.embed_dim * self.in_chans * (self.scale_ratio * self.scale_ratio)
    #     if self.norm is not None:
    #         flops += Ho * Wo * self.embed_dim
    #     return flops

class PositionEmbeddingSine(nn.Module):
    """
    This is a more standard version of the position embedding, very similar to the one
    used by the Attention is all you need paper, generalized to work on images.
    """

    def __init__(self, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.temperatures = temperature
        self.normalzie = normalize
        if scale is not None and normalize is False:
            raise ValueError("normalize should be True if scale is passed")
        if scale is None:
            scale = 2 * math.pi
        self.scale = scale

    def forward(self, x):
        # 输入维度方向的累积和
        # 1 x N
        not_mask = torch.ones(x.shape[0], x.shape[1]).to(x.device)
        embed = not_mask.cumsum(1, dtype=torch.float32)
        if self.normalzie:
            eps = 1e-6
            embed = embed / (embed[:, -1:] + eps) * self.scale

        # dim维向量[0,1,2...dim-1]
        dim_t = torch.arange(x.shape[2], dtype=torch.float32, device=x.device)
        # 10000^(2i/dim)
        dim_t = self.temperatures ** (2 * (dim_t // 2) / x.shape[2])

        # b x N x dim/2
        pos = embed[:, :, None] / dim_t

        # b x N x dim
        pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3).flatten(2)
        # b x dim x N
        #pos = pos.permute(0, 2, 1)
        return pos

# 输入特征图最好是window size x 尺度 的倍数
# 每个尺度特征图的通道数需要被 attention 模块中的 head 数量整除
class WindowTransformerV2(nn.Module):
    r""" Swin Transformer
        A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`  -
          https://arxiv.org/pdf/2103.14030

    Args:
        scale_ratio (int | tuple(int)): scale_ratio 特征图每次下采样的倍数. Default: 2
        in_chans (int): Number of input image channels. Default: 256
        depths (tuple(int)): Depth of each Swin Transformer layer.  注意力模块个数/尺度
        num_heads (tuple(int)): Number of attention heads in different layers.  注意力模块多头数/尺度
        window_size (int): Window size. Default: 7
        mlp_ratio (tuple(float)): Ratio of mlp hidden dim to embedding dim. Default: 4
        qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
        drop_rate (float): Dropout rate. Default: 0 在 PatchEmbed 层后面
        attn_drop_rate (float): Attention dropout rate. Default: 0     在 Transformer Encoder 中
        drop_path_rate (float): Stochastic depth rate. Default: 0.1    在 Transformer Encoder 中
        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
        patch_norm (bool): If True, add normalization after patch embedding. Default: True  在 PatchEmbed 层后面
    """

    def __init__(self, scale_ratio=2, in_chans=256, depths=[2, 6, 2],
                 num_heads=[4, 8, 16], mlp_ratio=[2., 2., 2.],
                 window_size=7, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
                 drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True,
                **kwargs):
        super().__init__()

        self.num_layers = len(depths)           # 层数
        time = int(scale_ratio // 2)            # 下采样的次数
        self.embed_dim = in_chans * (2 ** time)   # 首层特征的通道数
        self.patch_norm = patch_norm
        # 尾层特征的通道数
        self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
        self.mlp_ratio = mlp_ratio

        # split image into non-overlapping patches
        # 下采样一次，通道数增加一倍
        self.patch_embed = PatchEmbed(
            scale_ratio=scale_ratio, in_chans=in_chans,
            norm_layer=norm_layer if self.patch_norm else None)

        # absolute position embedding
        # self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
        # trunc_normal_(self.absolute_pos_embed, std=.02)
        # self.absolute_pos_embed = PositionEmbeddingSine()

        self.pos_drop = nn.Dropout(p=drop_rate)

        # stochastic depth 随机深度
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule


        # build layers
        self.layers = nn.ModuleList()
        for i_layer in range(self.num_layers):
            layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
                               depth=depths[i_layer],
                               num_heads=num_heads[i_layer],
                               window_size = window_size,
                               mlp_ratio=self.mlp_ratio[i_layer],
                               qkv_bias=qkv_bias, qk_scale=qk_scale,
                               drop=drop_rate, attn_drop=attn_drop_rate,
                               drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
                               norm_layer=norm_layer,
                               downsample=PatchMerging if (i_layer < self.num_layers - 1) else None)
            self.layers.append(layer)

        self.norm = norm_layer(self.num_features)
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    # @torch.jit.ignore
    # def no_weight_decay(self):
    #     return {'absolute_pos_embed'}

    # @torch.jit.ignore
    # def no_weight_decay_keywords(self):
    #     return {'relative_position_bias_table'}

    def forward_features(self, x):
        # x: [B, L, C]
        x, H, W, = self.patch_embed(x)          # 输入特征图下采样到首层特征的尺度
        # 使用绝对位置编码
        # x = x + self.absolute_pos_embed(x)
        x = self.pos_drop(x)

        res = []                                    # 每层结果特征图列表
        i = 0                                       # 层序号
        for layer in self.layers:
            if i < self.num_layers - 1:             # 非尾层
                tmp, H_new, W_new = layer(x, H, W)
                x_tmp = tmp[0]
                B,N,C = x_tmp.shape
                x_tmp = x_tmp.view(B, C, H, W)
                res.append(x_tmp)
                # 更新 x, H, W
                x = tmp[1]
                H = H_new
                W = W_new
            else:                                   # 尾层
                x, H, W = layer(x, H, W)
                x = self.norm(x)
                B, N, C = x.shape
                x = x.view(B, C, H, W)
                res.append(x)
            i += 1

        return res

    def forward(self, x):
        res = self.forward_features(x)
        return res

    # def flops(self):
    #     flops = 0
    #     flops += self.patch_embed.flops()
    #     for i, layer in enumerate(self.layers):
    #         flops += layer.flops()
    #     flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
    #     flops += self.num_features * self.num_classes
    #     return flops