# _*_ coding : utf-8  _*_
# @Time : 2025/3/19 23:49
# @Author : 马梓航
# @File : model
# @Project : SWT（SWT各种实现+小马实现
import torch
from torch import nn
import torchvision
import torch.nn.functional as F
import math
from einops import rearrange

#定义patch向window转换的辅助函数
def patch2window(x,window_size):
    batch_size, length_sequece, model_dim = x.shape
    H=int(math.sqrt(length_sequece))
    W=H
    x=rearrange(x,"bs (H W) md->bs md H W",H=H,W=W)
    num_window=H//window_size

    #patch向window用unfold得不到想要的答案，注意

    #下行不知道那里错了
    # x = rearrange(x, "bs md (nw p1) (nw p1)->bs (nw nw) (p1 p1) md", nw=num_window,p1=window_size)

    x=x.reshape(batch_size,model_dim,window_size,num_window,window_size,num_window).transpose(-1,-2).flatten(-2).transpose(-1,-3).flatten(-2)
    x=x.reshape(int(batch_size*num_window*num_window),int(window_size*window_size),model_dim)

    return x

#定义生成mask矩阵的函数
def build_mask_for_shifted_wmsa(batch_size,image_height,image_width,window_size):
    index_matrix=torch.zeros(image_height,image_width)

    for i in range(image_height):
        for j in range(image_width):
            row_times=(i+window_size//2)//window_size
            col_times = (j+window_size // 2) // window_size
            index_matrix[i,j]=row_times*(image_height//window_size)+row_times+col_times+1

    rolled_index_matrix=torch.roll(index_matrix,shifts=(-window_size//2,-window_size//2),dims=(0,1))
    rolled_index_matrix=rolled_index_matrix.unsqueeze(0).unsqueeze(0)

    c=F.unfold(rolled_index_matrix,kernel_size=window_size,stride=window_size).transpose(-1,-2)#[bs,num_window,num_patch_in_window]
    c=c.tile(batch_size,1,1)

    bs,num_window,num_patch_in_window=c.shape

    c1=c.unsqueeze(-1)
    c2=(c1-c1.transpose(-1,-2))==0
    vaild_matrix=c2.to(torch.float32)
    additive_matrix=(1-vaild_matrix)*(-1e9)
    additive_matrix=additive_matrix.reshape(bs*num_window,num_patch_in_window,num_patch_in_window)

    return additive_matrix

#定义使得窗内滚动的辅助函数
def shift_window(w_msa_output,window_size,shift_size,generate_mask):
    bs, patch_depth, image_height, image_width = w_msa_output.shape
    n_window=image_height//window_size
    num_window=int(n_window**2)
    num_patch_in_window=int(window_size**2)

    rolled_w_msa_output=torch.roll(w_msa_output,shifts=(shift_size,shift_size),dims=(2,3))#shifts为负数时是向左上滚，反之向右下滚

    shifted_w_msa_input=rolled_w_msa_output.reshape(bs,patch_depth,int(math.sqrt(num_window)),window_size,int(math.sqrt(num_window)),window_size)

    shifted_w_msa_input=shifted_w_msa_input.transpose(3,4)
    shifted_w_msa_input=shifted_w_msa_input.reshape(bs,patch_depth,num_window*num_patch_in_window)
    shifted_w_msa_input=shifted_w_msa_input.transpose(-1,-2)
    shifted_window=shifted_w_msa_input.reshape(bs,num_window,num_patch_in_window,patch_depth)#滚完之后又变成window形式了

    if generate_mask:
        additive_mask=build_mask_for_shifted_wmsa(bs,image_height,image_width,window_size)
    else:
        additive_mask=None

    return shifted_window,additive_mask


# 声明PatchMerging类（在BasicLayer中调用了，算是个辅助类
class PatchMerging(nn.Module):

    def __init__(self,model_dim):
        super().__init__()
        self.norm=nn.LayerNorm(4*model_dim)
        self.reduction=nn.Linear(4*model_dim,2*model_dim)

    def forward(self,x,H,W):
        batch_size,length_sequence,model_dim=x.shape

        assert length_sequence==H*W ,"there is sth bad happened in patch merging"

        x=x.contiguous().view(batch_size,H,W,model_dim)
        need_pad=(H%2 != 0) or (W%2 != 0)
        if need_pad:
            x=F.pad(x,(0,0,0,H%2,0,W%2))#2-H%2=H%2

        #个人拙见，我觉得用切片方法得到的new patches更合理，用unfold得到的感觉不太对
        x0=x[:,0::2,0::2,:]
        x1=x[:,1::2,0::2,:]
        x2=x[:,1::2,1::2,:]
        x3=x[:,0::2,1::2,:]
        x=torch.cat([x0,x1,x2,x3],dim=-1)#shape变为[batch_size,H/2,W/2,4*model_dim]
        x=x.reshape(batch_size,-1,4*model_dim)

        x=self.norm(x)
        x=self.reduction(x)

        return x


#声明ShiftedWindowAttention类（次次次终极类
class ShiftedWindowAttention(nn.Module):

    def __init__(self, model_dim, num_head, patch_size, attn_drop_rate, dpr,window_size):
        super().__init__()
        self.model_dim = model_dim
        self.num_head = num_head
        self.patch_size = patch_size
        self.head_dim = model_dim // num_head
        self.scale = self.head_dim ** -0.5

        self.window_size=window_size
        self.shift_window=window_size//2

        # self.qkv = nn.Linear(model_dim, model_dim * 3, bias=True)，可以为qkv增添偏置，但是在这里我没有使用
        self.qkv = nn.Linear(model_dim, model_dim * 3)
        self.attn_drop = nn.Dropout(attn_drop_rate)
        self.proj = nn.Linear(model_dim, model_dim)
        self.proj_drop = nn.Dropout(dpr)

        self.relative_position_bias_table = nn.Parameter(
            torch.zeros((2 * self.window_size- 1)*(2 * self.window_size - 1)))

        coords_h = torch.arange(self.window_size)
        coords_w = torch.arange(self.window_size)
        coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
        coords_flatten = torch.flatten(coords, 1)
        # 本行与下行等价relative_coords = coords_flatten.unsqueeze(-1) - coords_flatten.unsqueeze(-2)
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
        relative_coords = relative_coords.permute(1, 2, 0).contiguous()
        relative_coords[:, :, 0] += self.window_size - 1
        relative_coords[:, :, 1] += self.window_size - 1
        relative_coords[:, :, 0] *= 2 * self.window_size - 1
        relative_position_index = relative_coords.sum(-1)

        # 将相对位置索引矩阵注册为一个缓冲区，这样它会随着模型一起保存，但不会被视为可训练参数
        self.register_buffer("relative_position_index", relative_position_index)

        self.softmax = nn.Softmax(-1)

    def forward(self, x):
        batch_size, length_sequence, model_dim = x.shape

        image_height=int(math.sqrt(length_sequence))
        image_width=image_height

        x=x.transpose(-1,-2)
        x=x.reshape(batch_size,model_dim,image_height,image_width)

        x,mask=shift_window(x,self.window_size,-self.window_size//2,True)
        batch_size,num_windows,num_patch_in_window, model_dim=x.shape
        bs_nm=int(batch_size*num_windows)
        x=rearrange(x,"a b c d->(a b) c d")

        qkv = self.qkv(x).reshape(bs_nm, num_patch_in_window, model_dim, 3)

        q, k, v = qkv.unbind(-1)  # 等价于q,k,v=qkv.chunks(-1)

        q = q.reshape(bs_nm, self.num_head, num_patch_in_window, self.head_dim)
        k = k.reshape(bs_nm, self.num_head, num_patch_in_window, self.head_dim)
        v = v.reshape(bs_nm, self.num_head, num_patch_in_window, self.head_dim)

        mask=mask.unsqueeze(1)
        mask=torch.tile(mask,(1,self.num_head,1,1))

        attn = q @ k.transpose(-1, -2) * self.scale

        # self.relative_position_index.view(-1)是将self.relative_position_index在最后一维展平
        relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
            self.window_size * self.window_size, self.window_size * self.window_size)
        relative_position_bias = relative_position_bias.unsqueeze(0).unsqueeze(0)
        relative_position_bias = torch.tile(relative_position_bias, (bs_nm, self.num_head, 1, 1))
        attn = attn + relative_position_bias + mask

        attn = self.softmax(attn)
        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(bs_nm, num_patch_in_window, model_dim)
        x=x.reshape(batch_size, image_height,image_width, model_dim).transpose(-1,-2).transpose(-3,-2)
        x, _ = shift_window(x, self.window_size, self.window_size // 2, generate_mask=False)
        x=x.reshape(bs_nm,num_patch_in_window,model_dim)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x


#声明WindowAttention类（次次次终极类
class WindowAttention(nn.Module):

    def __init__(self,model_dim,num_head,patch_size,attn_drop_rate,dpr,window_size):
        super().__init__()
        self.model_dim=model_dim
        self.num_head=num_head
        self.patch_size=patch_size
        self.head_dim=model_dim//num_head
        self.scale = self.head_dim ** -0.5

        self.window_size = window_size

        #self.qkv = nn.Linear(model_dim, model_dim * 3, bias=True)，可以为qkv增添偏置，但是在这里我没有使用
        self.qkv = nn.Linear(model_dim, model_dim * 3)
        self.attn_drop = nn.Dropout(attn_drop_rate)
        self.proj = nn.Linear(model_dim, model_dim)
        self.proj_drop = nn.Dropout(dpr)

        self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size- 1)*(2 * self.window_size - 1)))

        coords_h = torch.arange(self.window_size)
        coords_w = torch.arange(self.window_size)
        coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
        coords_flatten = torch.flatten(coords, 1)
        #本行与下行等价relative_coords = coords_flatten.unsqueeze(-1) - coords_flatten.unsqueeze(-2)
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
        relative_coords = relative_coords.permute(1, 2, 0).contiguous()
        relative_coords[:, :, 0] += self.window_size - 1
        relative_coords[:, :, 1] += self.window_size - 1
        relative_coords[:, :, 0] *= 2 * self.window_size - 1
        relative_position_index = relative_coords.sum(-1)

        #将相对位置索引矩阵注册为一个缓冲区，这样它会随着模型一起保存，但不会被视为可训练参数
        self.register_buffer("relative_position_index", relative_position_index)

        self.softmax=nn.Softmax(-1)

    def forward(self,x):
        bs_nm,num_patch_in_window,model_dim=x.shape
        qkv=self.qkv(x).reshape(bs_nm,num_patch_in_window,model_dim,3)

        q,k,v=qkv.unbind(-1)#等价于q,k,v=qkv.chunks(-1)

        q = q.reshape(bs_nm,self.num_head,num_patch_in_window,self.head_dim)
        k = k.reshape(bs_nm, self.num_head, num_patch_in_window,self.head_dim)
        v = v.reshape(bs_nm,  self.num_head,num_patch_in_window, self.head_dim)

        attn=q@k.transpose(-1,-2)*self.scale

        #self.relative_position_index.view(-1)是将self.relative_position_index在最后一维展平
        relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(self.window_size * self.window_size, self.window_size * self.window_size)
        relative_position_bias = relative_position_bias.unsqueeze(0).unsqueeze(0)
        relative_position_bias=torch.tile(relative_position_bias,(bs_nm,self.num_head,1,1))
        attn = attn + relative_position_bias

        attn=self.softmax(attn)
        attn=self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(bs_nm,num_patch_in_window,model_dim)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x


#声明SwinTransformerBlock类（次次终极类
class SwinTransformerBlock(nn.Module):

    def __init__(self,model_dim,num_head,window_size,mlp_ratio,emb_drop_rate,attn_drop_rate,dpr,patch_size):
        super().__init__()
        self.model_dim=model_dim
        self.num_head=num_head
        self.window_size=window_size

        self.norm1=nn.LayerNorm(model_dim)
        self.norm2=nn.LayerNorm(model_dim)
        self.norm3=nn.LayerNorm(model_dim)
        self.norm4=nn.LayerNorm(model_dim)

        self.mlp1=nn.Linear(model_dim,model_dim*mlp_ratio)
        self.mlp2=nn.Linear(model_dim*mlp_ratio,model_dim)
        self.mlp3=nn.Linear(model_dim,model_dim*mlp_ratio)
        self.mlp4=nn.Linear(model_dim*mlp_ratio,model_dim)

        self.drop1=nn.Dropout(emb_drop_rate)
        self.drop2 = nn.Dropout(emb_drop_rate)
        self.drop3 = nn.Dropout(emb_drop_rate)
        self.drop4 = nn.Dropout(emb_drop_rate)

        self.wmhsa1=WindowAttention(model_dim,num_head,patch_size,attn_drop_rate,dpr,window_size)#不要mask
        self.wmhsa2 = ShiftedWindowAttention(model_dim,num_head,patch_size,attn_drop_rate,dpr,window_size)#要mask

    def forward(self,input):
        batch_size,length_sequece,model_dim=input.shape

        #W-MSA blockie
        x=self.norm1(input)
        x=patch2window(x,self.window_size)
        x=self.wmhsa1(x)
        x=x.reshape(batch_size,length_sequece,model_dim)
        input2=input+x
        x=self.drop2(self.mlp2(self.drop1(self.mlp1(self.norm2(input2)))))
        input3=x+input2

        #SW-MSA blockie
        x=self.norm3(input3)
        x=self.wmhsa2(x)
        x = x.reshape(batch_size, length_sequece, model_dim)
        input4=input3+x
        x = self.drop4(self.mlp4(self.drop3(self.mlp3(self.norm4(input4)))))
        x+=input4

        return x

#声明BasicLayer（次终极类
class Basiclayer(nn.Module):

    def __init__(self,model_dim,depth,num_head,window_size,mlp_ratio,emb_drop_rate,attn_drop_rate,dprlist,PM,patch_size):
        super().__init__()
        self.model_dim=model_dim
        self.depth=depth
        self.window_size=window_size
        self.shift_size=window_size//2

        self.blocks=nn.ModuleList([
            SwinTransformerBlock(
                model_dim,
                num_head,
                window_size,
                mlp_ratio,
                emb_drop_rate,
                attn_drop_rate,
                dprlist[i] if isinstance(dprlist,list) else dprlist,
                patch_size
            )
            for i in range(depth)
        ])

        if PM is not None:
            self.downsample=PM(model_dim)
        else:
            self.downsample=None

    def forward(self,x,H,W):

        for block in self.blocks:
            x=block(x)
        print("after the right now stage, the outcome's shape is {}".format(x.shape))
        if self.downsample is not None:
            x=self.downsample(x,H,W)
            H,W=(H+1)//2,(W+1)//2#因为设置了pad，所以可以这么写，没有pad只写H//2和W//2即可
        return x,H,W




#声明SwinTrans（最终类
class SwinTransformer(nn.Module):
    def __init__(self,patch_size,in_channel,model_dim,emb_drop_rate,max_sequence_length,num_heads,window_size,mlp_ratio,num_classes,depths,atten_drop_rate):
        super().__init__()
        self.convForImage=nn.Conv2d(in_channel,model_dim,patch_size,patch_size)
        self.patch_size=patch_size
        self.embedding_drop=nn.Dropout(emb_drop_rate)
        self.embedding_norm=nn.LayerNorm(model_dim)
        self.max_sequence_length=max_sequence_length
        self.num_layers=len(depths)
        self.mlp_ratio=mlp_ratio

        #利用lin的空间生成depths个dpr
        dpr=[x for x in torch.linspace(0,atten_drop_rate,sum(depths))]

        self.layers = nn.ModuleList([])
        for item in range(self.num_layers):
            layers=Basiclayer(
                int(model_dim*(2**item)),
                depths[item],
                num_heads[item],
                window_size,
                self.mlp_ratio,
                emb_drop_rate,#mlp的dropout_rate与emb的共用同一个
                atten_drop_rate,
                dpr[sum(depths[:item]):sum(depths[:item+1])],
                PatchMerging if (item<self.num_layers-1) else None,
                patch_size
            )
            self.layers.append(layers)

        self.final_norm=nn.LayerNorm(model_dim*2**(self.num_layers-1))
        self.final_pool=nn.AdaptiveAvgPool1d(1)#1代表着将输入张量经过池化后最后一维变为1
        #上面的final_pool的操作等价于obj.mean(dim=-1)
        self.final_head=nn.Linear(model_dim*2**(self.num_layers-1),num_classes)

    def forward(self,x):

        #step1 : image to embedding with layerNorm and dropout
        batch_size,in_channel,image_height,image_width=x.shape

        #处理方式1（强行assert
        # assert (image_height%self.patch_size==0) and (image_width%self.patch_size==0),"image_height (or image_width) must be divided by patch_size which you set"
        #处理方式2（补pad
        need_pad=(image_height%self.patch_size!=0) or (image_width%self.patch_size!=0)
        if need_pad:
            x=F.pad(x,(0,self.patch_size-image_width%self.patch_size,0,self.patch_size-image_height%self.patch_size,0,0))
        #在W和H的右侧补pad

        x=self.convForImage(x).flatten(2).transpose(-1,-2)
        x=self.embedding_norm(x)
        x=self.embedding_drop(x)#以(1,3,224,224)的为例，此时的x的shape为torch.Size([1, 3136, 96])

        # step2 : put emb into the layers
        _, HH, _ = x.shape
        H = int(math.sqrt(HH))
        W = H
        for layer in self.layers:
            x,H,W=layer(x,H,W)

        # step3 : norm,pool and do classes
        x=self.final_norm(x)
        x=self.final_pool(x.transpose(-1,-2))
        x=x.flatten(1)
        x=self.final_head(x)
        print("the final outcome's shape is {}. we can do the classes now!!!".format(x.shape))
        return x


##test
def thisisswt():
    swt=SwinTransformer(
        4,
        3,
        96,
        0.01,
        200,
        (3,6,12,24),
        7,
        4,
        1000,
        (2,2,6,2),
        0.02
    )
    return swt

stage=1
swt=thisisswt()
image=torch.randn(1,3,224,224)#传入的图像最好是7的32倍或者7的64倍
swt(image)