import torch
import torch.nn as nn
import numpy as np


def window_partition(x, window_size):
    B, H, W, C = x.shape
    x = x.reshape(B, H // window_size, window_size, W // window_size, window_size, C)
    x = x.permute(0, 1, 3, 2, 4, 5)  # [B, H//window_size, W//window_size, window_size, window_size, C]
    x = x.reshape(-1, window_size, window_size, C)  # [B*num_window, window_size, window_size, C]
    return x


def window_reverse(windows, window_size, H, W):
    # windows:[B*num_window, window_size, window_size, C]
    B = int(windows.shape[0] // (H / window_size * W / window_size))
    # x: [B, H//window_size, W//window_size, window_size, window_size, C]
    x = windows.reshape(B, H // window_size, W // window_size, window_size, window_size, -1)
    x = x.permute(0, 1, 3, 2, 4, 5)  # [B, H//window_size, window_size, W//window_size, window_size, C]

    x = x.reshape(B, H, W, -1)
    return x


# 对于不需要计算的部分产生一个大的负数-100，这样softmax之后就是0
def generate_mask(input_res, window_size, shift_size):
    H, W, = input_res

    # 保证H、W可以被window size整除 ceil 向上取整
    Hp = int(np.ceil(H / window_size)) * window_size
    Wp = int(np.ceil(W / window_size)) * window_size


    image_mask = torch.zeros((1, Hp, Wp, 1))

    h_slice = (slice(0, -window_size),
               slice(-window_size, -shift_size),
               slice(-shift_size, None)
               )


    w_slice = (slice(0, -window_size),
               slice(-window_size, -shift_size),
               slice(-shift_size, None)
               )

    cnt = 0
    for h in h_slice:
        for w in w_slice:
            image_mask[:, h, w, :] = cnt
            cnt += 1

    # 将mask划分成一个个窗口
    # [B * window_num , MH, MW, C]
    mask_window = window_partition(image_mask, window_size)

    # 将每一个窗口内的元素展平
    # [B * window_num * C, MH*MW]
    mask_window = mask_window.reshape(-1, window_size * window_size)

    # [B * window_num * C, 1, MH*MW] - [B * window_num * C, MH*MW, 1] 广播机制 -> [B * window_num * C, MH*MW, MH*MW]
    #  mask_windows.unsqueeze(1) 将每个窗口的行向量复制MH*MW次
    #  mask_windows.unsqueeze(2) 将每个窗口的行向量中每个元素 复制MH*MW次
    attn_mask = mask_window.unsqueeze(1) - mask_window.unsqueeze(2)

    # 同一区域为0 不同区域为非0数。 得到当前窗口中对应某一个像素 所采用的attention mask。
    # 将不等于0的值变为-100,将等于0的值变为0
    attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
    return attn_mask


'''
    将图片裁剪成patch_size大小的一个个patch
'''
class Patch_Embeding(nn.Module):
    def __init__(self, dim=96, patch_size=4):
        super().__init__()
        # 96=4*4*3*2
        # 将3维图片转为96维度，然后对每个(4*4)的patch进行扫描，和VIT一样
        self.patch = nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size)
        self.norm = nn.LayerNorm(dim)

    def forward(self, x):
        x = self.patch(x)  # [B, C, H, W] , C = dim
        x = x.flatten(2).transpose(1, 2)  # [B, num_patches, C]
        x = self.norm(x)
        # x=[batch_size,num_patches,C]
        return x


class Patch_Merging(nn.Module):
    def __init__(self, input_res, dim):
        super().__init__()
        self.resolution = input_res
        self.dim = dim

        self.reduction = nn.Linear(4 * dim, 2 * dim)
        self.norm = nn.LayerNorm(2 * dim)

    def forward(self, x):
        # x: [B, num_patches, C]
        H, W = self.resolution
        B, _, C = x.shape

        x = x.reshape(B, H, W, C)

        # 类似Focus操作
        x0 = x[:, 0::2, 0::2, :]
        x1 = x[:, 0::2, 1::2, :]
        x2 = x[:, 1::2, 0::2, :]
        x3 = x[:, 1::2, 1::2, :]

        x = torch.cat((x0, x1, x2, x3), -1)

        x = x.reshape(B, -1, 4 * C)
        x = self.reduction(x)
        x = self.norm(x)

        return x


class window_attention(nn.Module):
    def __init__(self, dim, window_size, num_heads=8, qkv_bias=False):
        super().__init__()

        self.num_heads = num_heads
        prehead_dim = dim // self.num_heads
        self.scale = prehead_dim ** -0.5
        self.window_size = window_size
        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.proj = nn.Linear(dim, dim)

        # 相对位置编码
        # 可以看这篇https://blog.csdn.net/qq_37541097/article/details/121119988?spm=1001.2014.3001.5502
        self.relative_position_bias_table = nn.Parameter(
            torch.zeros((2 * self.window_size - 1) * (2 * self.window_size - 1), num_heads)
        )
        coords_h = torch.arange(self.window_size)
        coords_w = torch.arange(self.window_size)
        coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2,wh,ww
        coords_flatten = torch.flatten(coords, 1)  # 2,wh*ww
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 2,wh,ww

        #wh,ww,2
        relative_coords = relative_coords.permute(1, 2, 0).contiguous()
        relative_coords[:, :, 0] += self.window_size - 1  # shift to start from 0
        relative_coords[:, :, 1] += self.window_size - 1
        relative_coords[:, :, 0] *= 2 * self.window_size - 1

        self.relative_position_index = relative_coords.sum(-1)  # Wh*Ww, Wh*Ww


    def forward(self, x, mask=None):
        # x: [B*num_window, num_patches, embed_dim]

        B, num_patches, total_dim = x.shape

        qkv = self.qkv(x)  # [B*num_window,, num_patches, 3*embed_dim]

        qkv = qkv.reshape(B, num_patches, 3, self.num_heads,
                          total_dim // self.num_heads)  # [B*num_window,, num_patches, 3, num_heads, prehead_dim]

        qkv = qkv.permute(2, 0, 3, 1, 4)  # [3, B*num_window,, num_heads, num_patches, prehead_dim]

        q, k, v = qkv[0], qkv[1], qkv[2]  # [B*num_window,, num_heads, num_patches, prehead_dim]


        atten = (q @ k.transpose(-2, -1)) * self.scale  # [B*num_window,, num_heads, num_patches, num_patches]


        # 添加相对位置编码
        # Wh*Ww,Wh*Ww,nH
        relative_position_bias=self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
            self.window_size*self.window_size,self.window_size*self.window_size,-1
        )
        # nH, Wh*Ww, Wh*Ww
        relative_position_bias=relative_position_bias.permute(2,0,1).contiguous()
        attn=atten+relative_position_bias.unsqueeze(0)


        if mask is None:
            atten = atten.softmax(dim=-1)
        else:
            # mask: [num_window, num_patches, num_patches]
            # atten: [B*num_window, num_head, num_patches, num_patches]
            atten = atten.reshape(B // mask.shape[0], mask.shape[0], self.num_heads, mask.shape[1], mask.shape[1])

            # reshape_atten [B, num_window, num_head, num_patches, num_patches]
            # mask [1, num_window, 1, num_patches, num_patches]
            if(torch.cuda.is_available()):
                mask=mask.cuda()
            atten = atten + mask.unsqueeze(1).unsqueeze(0)

            atten = atten.reshape(-1, self.num_heads, mask.shape[1],
                                  mask.shape[1])  # [B*num_window, num_head, num_patches, num_patches]
            atten = atten.softmax(dim=-1)


        atten = atten @ v  ## [B, num_heads, num_patches, prehead_dim]

        atten = atten.transpose(1, 2)  # [B, num_patches+1, num_heads, prehead_dim]
        atten = atten.reshape(B, num_patches, total_dim)  # [B, num_patches+1, embed_dim]

        out = self.proj(atten)

        return out


class MLP(nn.Module):
    def __init__(self, in_dim, mlp_ratio=4):
        super().__init__()
        self.fc1 = nn.Linear(in_dim, in_dim * mlp_ratio)
        # self.actlayer = nn.GELU()
        self.actlayer = nn.ReLU()
        self.fc2 = nn.Linear(mlp_ratio * in_dim, in_dim)

    def forward(self, x):
        x = self.fc1(x)  # [B, num_patches+1, hidden_dim]
        x = self.actlayer(x)
        x = self.fc2(x)  # [B, num_patches+1, out_dim]
        x = self.actlayer(x)

        return x


# swin_encode & Patch_Merging
class Swin_Block(nn.Module):
    def __init__(self, dim, num_heads, input_res, window_size, qkv_bias=False, shift_size=0):
        super().__init__()

        self.dim = dim  # 输入维度C
        self.resolution = input_res  # 当前特征图的H,W
        self.num_heads = num_heads
        self.window_size = window_size
        self.shift_size = shift_size
        self.atten_norm = nn.LayerNorm(dim)
        self.atten = window_attention(dim, window_size, num_heads, qkv_bias)
        self.mlp_norm = nn.LayerNorm(dim)
        self.mlp = MLP(dim, mlp_ratio=4)

    def forward(self, x):

        # x:[B, num_patches, embed_dim]
        # resolution是每个特征图的大小
        # [56,56]-->[28,28]-->[14,14]-->[7,7]
        H, W = self.resolution
        B, N, C = x.shape


        assert N == H * W

        h = x
        x = self.atten_norm(x)

        # 展平，方便移动窗口
        x = x.reshape(B, H, W, C)

        if self.shift_size > 0:

            # print(f'第一维度向左移动3位，第二维度向左移动3位，那么也就是说x[0,0,0,0]=shift[0,-3,-3,0]')

            shift_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))

            atten_mask = generate_mask(input_res=self.resolution, window_size=self.window_size,
                                       shift_size=self.shift_size)

        else:
            shift_x = x
            atten_mask = None
        # 将特征图划分为窗口大小,对每个窗口做自注意力操作
        # [B*num_patches, window_size, window_size, C]
        x_window = window_partition(shift_x, self.window_size)
        # reshape
        x_window = x_window.reshape(-1, self.window_size * self.window_size, C)
        # 自注意力操作

        atten_window = self.atten(x_window, mask=atten_mask)  # [B*num_patches, window_size*window_size, C]
        # 重新reshape回来
        atten_window = atten_window.reshape(-1, self.window_size, self.window_size, C)

        # 再将每个窗口还原回去每个patch大小的维度
        x = window_reverse(atten_window, self.window_size, H, W)  # [B, H, W, C]
        x = x.reshape(B, -1, C)
        # resnet
        x = h + x

        h = x
        x = self.mlp_norm(x)
        # MLP操作
        x = self.mlp(x)
        x = h + x

        return x


'''
    每个stage相当于特征金字塔的一个层
'''


class Swin_stage(nn.Module):
    def __init__(self,
                 depth,  # 每个block深度
                 dim,  # 输入的维度
                 num_heads,  # 多头注意力
                 input_res,  # 输入特征图的h,w
                 window_size,  # 窗口数量
                 qkv_bias=None,  # 自注意力的偏置
                 patch_merging=None  # 是否将patch进行合并
                 ):
        super().__init__()
        # 根据每个stage的深度进行堆叠block
        self.blocks = nn.ModuleList([
            Swin_Block(
                dim=dim,
                num_heads=num_heads,
                input_res=input_res,
                window_size=window_size,
                qkv_bias=qkv_bias,
                shift_size=0 if (i % 2 == 0) else window_size // 2  # 根据depth决定是否进行移位操作
            )
            for i in range(depth)
        ])

        if patch_merging is None:
            self.patch_merge = nn.Identity()
        else:
            self.patch_merge = Patch_Merging(input_res, dim)

    def forward(self, x):

        # 由于patch_size为4,所以总共是56*56个patch
        # 第一次进入的特征图为[b,56*56,96]
        for block in self.blocks:
            x = block(x)

        x = self.patch_merge(x)
        return x


class Swin_Model(nn.Module):
    def __init__(self,
                 img_size=224,  # 输入图片大小
                 patch_size=4,  # 每个patch大小
                 window_size=7,  # 每个窗口大小
                 in_dim=3,  # 输入channle
                 embed_dim=96,
                 num_heads=[3, 6, 12, 24],  # 多头注意力
                 depth=[2, 2, 6, 2],  # 每个block的深度
                 num_class=100,  # 分类数量
                 drop_rate=0,  # 默认为0
                 ):
        super().__init__()
        self.embed_dim = embed_dim
        self.patch_size = patch_size
        self.window_size = window_size
        self.num_heads = num_heads
        self.depth = depth
        self.num_class = num_class
        self.num_stages = len(num_heads)
        # 96*(2^3)
        self.final_dim = int(self.embed_dim * (2 ** (self.num_stages - 1)))  ##before class linear dim
        # 每行patch数量
        self.patch_resolution = img_size // patch_size

        # 将图片划分为patch
        self.patch_embed = Patch_Embeding(self.embed_dim, self.patch_size)
        self.stages = nn.ModuleList()
        # 产生4个stage，每个stage里面有depth个block
        for idx, (depth, num_heads) in enumerate(zip(self.depth, self.num_heads)):
            stage = Swin_stage(
                depth=depth,
                dim=int(self.embed_dim * 2 ** idx),
                num_heads=num_heads,
                input_res=(self.patch_resolution // (2 ** idx), self.patch_resolution // (2 ** idx)),
                window_size=self.window_size,
                qkv_bias=True,
                patch_merging=Patch_Merging if (idx < self.num_stages - 1) else None
            )
            self.stages.append(stage)
        self.pos_drop = nn.Dropout(p=drop_rate)
        self.norm = nn.LayerNorm(self.final_dim)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.head = nn.Linear(self.final_dim, self.num_class)
        self.apply(self._init_weights)

    def forward(self, x):
        x = self.patch_embed(x)
        # 3136=(224/4)*(224/4)
        # torch.Size([2, 3136, 96])  b,(w/4*h/4),c
        # 得到的是(h/4*w/4,c)

        # dropout操作,drop_path操作无
        x = self.pos_drop(x)

        for stage in self.stages:
            x = stage(x)
            # (28,28)-->(14,14)-->(7,7)

        x = self.norm(x)
        x = self.avgpool(x.transpose(1, 2))  # B C 1
        x = torch.flatten(x, 1)
        x = self.head(x)

        return x

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            # 此处为截断正态分布
            # from timm.models.layers import DropPath, to_2tuple, trunc_normal_
            # trunc_normal_(m.weight, std=.02)
            # 我的服务器安装不了,所以不用
            nn.init.normal_(m.weight, mean=0, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)


def main():
    input = torch.randn((2, 3, 224, 224))
    model = Swin_Model()
    out = model(input)
    print(out.size())  # [B,num_classes]


if __name__ == '__main__':
    main()
