from functools import reduce, lru_cache

from operator import mul



import numpy as np

import torch

import torch.nn as nn

import torch.nn.functional as F

import torch.utils.checkpoint as checkpoint

from einops import rearrange

from mmcv.runner import load_checkpoint

from timm.models.layers import DropPath, trunc_normal_

import pdb



class Mlp(nn.Module):

    """ Multilayer perceptron."""



    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):

        super().__init__()

        out_features = out_features or in_features

        hidden_features = hidden_features or in_features

        self.fc1 = nn.Linear(in_features, hidden_features)

        self.act = act_layer()

        self.fc2 = nn.Linear(hidden_features, out_features)

        self.drop = nn.Dropout(drop)



    def forward(self, x):

        x = self.fc1(x)

        x = self.act(x)

        x = self.drop(x)

        x = self.fc2(x)

        x = self.drop(x)

        return x





def img2windows(img, H_sp, W_sp):

    """

    img: B C D H W

    """

    B, C, D, H, W = img.shape

    img_reshape = img.view(B, C, D, H // H_sp, H_sp, W // W_sp, W_sp)

    img_perm = img_reshape.permute(0, 2, 3, 5, 4, 6, 1).contiguous().reshape(-1, D * H_sp * W_sp, C)

    return img_perm





def windows2img(img_splits_hw, H_sp, W_sp, D, H, W):

    """

    img_splits_hw: B' D H W C

    """



    B = int(img_splits_hw.shape[0] / (D * H * W / H_sp / W_sp))



    img = img_splits_hw.view(B, D, H // H_sp, W // W_sp, H_sp, W_sp, -1)

    img = img.permute(0, 1, 2, 4, 3, 5, 6).contiguous().view(B, D, H, W, -1)



    return img





class Merge_Block(nn.Module):

    def __init__(self, dim, dim_out, norm_layer=nn.LayerNorm):

        super().__init__()

        self.conv = nn.Conv3d(dim, dim_out, 3, 2, 1)

        self.norm = norm_layer(dim_out)



    def forward(self, x):

        B, new_HW, C = x.shape

        D = 32

        H = W = int(np.sqrt(new_HW // D))

        x = x.transpose(-2, -1).contiguous().view(B, C, D, H, W)

        x = self.conv(x)

        B, C = x.shape[:2]

        x = x.view(B, C, -1).transpose(-2, -1).contiguous()

        x = self.norm(x)



        return x





def window_partition(x, window_size):

    """

    Args:

        x: (B, D, H, W, C)

        window_size (tuple[int]): window size

    Returns:

        windows: (B*num_windows, window_size*window_size, C)

    """

    B, D, H, W, C = x.shape

    x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C)

    windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)

    return windows





def window_reverse(windows, window_size, B, D, H, W):

    """

    Args:

        windows: (B*num_windows, window_size, window_size, C)

        window_size (tuple[int]): Window size

        H (int): Height of image

        W (int): Width of image

    Returns:

        x: (B, D, H, W, C)

    """

    x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1], window_size[2], -1)

    x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)

    return x





def get_window_size(x_size, window_size, shift_size=None):

    use_window_size = list(window_size)

    if shift_size is not None:

        use_shift_size = list(shift_size)

    for i in range(len(x_size)):

        if x_size[i] <= window_size[i]:

            use_window_size[i] = x_size[i]

            if shift_size is not None:

                use_shift_size[i] = 0



    if shift_size is None:

        return tuple(use_window_size)

    else:

        return tuple(use_window_size), tuple(use_shift_size)





class WindowAttention3D(nn.Module):

    """ Window based multi-head self attention (W-MSA) module with relative position bias.

    It supports both of shifted and non-shifted window.

    Args:

        dim (int): Number of input channels.

        window_size (tuple[int]): The temporal length, height and width of the window.

        num_heads (int): Number of attention heads.

        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True

        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set

        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0

        proj_drop (float, optional): Dropout ratio of output. Default: 0.0

    """



    def __init__(self, dim, window_size, num_heads, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):



        super().__init__()

        self.dim = dim

        self.window_size = window_size  # Wd, Wh, Ww

        self.num_heads = num_heads

        head_dim = dim // num_heads

        self.scale = qk_scale or head_dim ** -0.5



        # define a parameter table of relative position bias

        self.relative_position_bias_table = nn.Parameter(

            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1), num_heads))  # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH



        # get pair-wise relative position index for each token inside the window

        coords_d = torch.arange(self.window_size[0])

        coords_h = torch.arange(self.window_size[1])

        coords_w = torch.arange(self.window_size[2])

        coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w))  # 3, Wd, Wh, Ww

        coords_flatten = torch.flatten(coords, 1)  # 3, Wd*Wh*Ww

        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 3, Wd*Wh*Ww, Wd*Wh*Ww

        relative_coords = relative_coords.permute(1, 2, 0).contiguous()  # Wd*Wh*Ww, Wd*Wh*Ww, 3

        relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0

        relative_coords[:, :, 1] += self.window_size[1] - 1

        relative_coords[:, :, 2] += self.window_size[2] - 1



        relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)

        relative_coords[:, :, 1] *= (2 * self.window_size[2] - 1)

        relative_position_index = relative_coords.sum(-1)  # Wd*Wh*Ww, Wd*Wh*Ww

        self.register_buffer("relative_position_index", relative_position_index)



        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)

        self.attn_drop = nn.Dropout(attn_drop)

        self.proj = nn.Linear(dim, dim)

        self.proj_drop = nn.Dropout(proj_drop)



        trunc_normal_(self.relative_position_bias_table, std=.02)

        self.softmax = nn.Softmax(dim=-1)



    def forward(self, x, mask=None, prev_v=None, prev_k=None, prev_q=None, is_decoder=False):

        """ Forward function.

        Args:

            x: input features with shape of (num_windows*B, N, C)

            mask: (0/-inf) mask with shape of (num_windows, N, N) or None

        """

        B_, N, C = x.shape

        qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)

        q, k, v = qkv[0], qkv[1], qkv[2]  # B_, nH, N, C



        q = q * self.scale

        attn = q @ k.transpose(-2, -1)



        relative_position_bias = self.relative_position_bias_table[

            self.relative_position_index[:N, :N].reshape(-1)].reshape(

            N, N, -1)  # Wd*Wh*Ww,Wd*Wh*Ww,nH

        relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()  # nH, Wd*Wh*Ww, Wd*Wh*Ww

        attn = attn + relative_position_bias.unsqueeze(0)  # B_, nH, N, N



        if mask is not None:

            nW = mask.shape[0]

            attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)

            attn = attn.view(-1, self.num_heads, N, N)

            attn = self.softmax(attn)

        else:

            attn = self.softmax(attn)



        attn = self.attn_drop(attn)



        x = (attn @ v).transpose(1, 2).reshape(B_, N, C)

        x = self.proj(x)

        x = self.proj_drop(x)

        x2 = None



        if is_decoder:

            q = q * self.scale

            attn2 = q @ prev_k.transpose(-2, -1)

            attn2 = attn2 + relative_position_bias.unsqueeze(0)



            if mask is not None:

                nW = mask.shape[0]

                attn2 = attn2.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)

                attn2 = attn2.view(-1, self.num_heads, N, N)

                attn2 = self.softmax(attn2)

            else:

                attn2 = self.softmax(attn2)



            attn2 = self.attn_drop(attn2)



            x2 = (attn2 @ prev_v).transpose(1, 2).reshape(B_, N, C)

            x2 = self.proj(x2)

            x2 = self.proj_drop(x2)



        return x, x2, v, k, q





class PositionalEncoding3D(nn.Module):

    def __init__(self, channels):

        """

        :param channels: The last dimension of the tensor you want to apply pos emb to.

        """

        super(PositionalEncoding3D, self).__init__()

        channels = int(np.ceil(channels / 6) * 2)

        if channels % 2:

            channels += 1

        self.channels = channels

        inv_freq = 1. / (10000 ** (torch.arange(0, channels, 2).float() / channels))

        self.register_buffer('inv_freq', inv_freq)



    def forward(self, tensor):

        """

        :param tensor: A 5d tensor of size (batch_size, x, y, z, ch)

        :return: Positional Encoding Matrix of size (batch_size, x, y, z, ch)

        """

        if len(tensor.shape) != 5:

            raise RuntimeError("The input tensor has to be 5d!")

        batch_size, x, y, z, orig_ch = tensor.shape

        pos_x = torch.arange(x, device=tensor.device).type(self.inv_freq.type())

        pos_y = torch.arange(y, device=tensor.device).type(self.inv_freq.type())

        pos_z = torch.arange(z, device=tensor.device).type(self.inv_freq.type())

        sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)

        sin_inp_y = torch.einsum("i,j->ij", pos_y, self.inv_freq)

        sin_inp_z = torch.einsum("i,j->ij", pos_z, self.inv_freq)

        emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1).unsqueeze(1).unsqueeze(1)

        emb_y = torch.cat((sin_inp_y.sin(), sin_inp_y.cos()), dim=-1).unsqueeze(1)

        emb_z = torch.cat((sin_inp_z.sin(), sin_inp_z.cos()), dim=-1)

        emb = torch.zeros((x, y, z, self.channels * 3), device=tensor.device).type(tensor.type())

        emb[:, :, :, :self.channels] = emb_x

        emb[:, :, :, self.channels:2 * self.channels] = emb_y

        emb[:, :, :, 2 * self.channels:] = emb_z



        return emb[None, :, :, :, :orig_ch].repeat(batch_size, 1, 1, 1, 1)





class SwinTransformerBlock3D(nn.Module):

    """ Swin Transformer Block.

    Args:

        dim (int): Number of input channels.

        num_heads (int): Number of attention heads.

        window_size (tuple[int]): Window size.

        shift_size (tuple[int]): Shift size for SW-MSA.

        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.

        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True

        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.

        drop (float, optional): Dropout rate. Default: 0.0

        attn_drop (float, optional): Attention dropout rate. Default: 0.0

        drop_path (float, optional): Stochastic depth rate. Default: 0.0

        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU

        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm

    """



    def __init__(self, dim, num_heads, window_size=(7, 7, 7), shift_size=(0, 0, 0),

                 mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,

                 act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_checkpoint=False):

        super().__init__()

        self.dim = dim

        self.num_heads = num_heads

        self.window_size = window_size

        self.shift_size = shift_size

        self.mlp_ratio = mlp_ratio

        self.use_checkpoint = use_checkpoint



        assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"

        assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"

        assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"



        self.norm1 = norm_layer(dim)

        self.attn = WindowAttention3D(

            dim, window_size=self.window_size, num_heads=num_heads,

            qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)



        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()

        self.norm2 = norm_layer(dim)

        mlp_hidden_dim = int(dim * mlp_ratio)

        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)



    def forward_part1(self, x, mask_matrix, prev_v, prev_k, prev_q, is_decoder):

        B, D, H, W, C = x.shape

        window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)



        x = self.norm1(x)

        # pad feature maps to multiples of window size

        pad_l = pad_t = pad_d0 = 0

        pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0]

        pad_b = (window_size[1] - H % window_size[1]) % window_size[1]

        pad_r = (window_size[2] - W % window_size[2]) % window_size[2]

        x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))

        _, Dp, Hp, Wp, _ = x.shape

        # cyclic shift

        if any(i > 0 for i in shift_size):

            shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))

            attn_mask = mask_matrix

        else:

            shifted_x = x

            attn_mask = None

        # partition windows

        x_windows = window_partition(shifted_x, window_size)  # B*nW, Wd*Wh*Ww, C

        # W-MSA/SW-MSA

        attn_windows, cross_attn_windows, v, k, q = self.attn(x_windows, mask=attn_mask, prev_v=prev_v, prev_k=prev_k,

                                                              prev_q=prev_q, is_decoder=is_decoder)  # B*nW, Wd*Wh*Ww, C



        # merge windows

        attn_windows = attn_windows.view(-1, *(window_size + (C,)))

        shifted_x = window_reverse(attn_windows, window_size, B, Dp, Hp, Wp)  # B D' H' W' C

        # reverse cyclic shift

        if any(i > 0 for i in shift_size):

            x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))

        else:

            x = shifted_x



        x2 = None

        if pad_d1 > 0 or pad_r > 0 or pad_b > 0:

            x = x[:, :D, :H, :W, :].contiguous()



        if cross_attn_windows is not None:

            # merge windows

            cross_attn_windows = cross_attn_windows.view(-1, *(window_size + (C,)))

            cross_shifted_x = window_reverse(cross_attn_windows, window_size, B, Dp, Hp, Wp)  # B D' H' W' C

            # reverse cyclic shift

            if any(i > 0 for i in shift_size):

                x2 = torch.roll(cross_shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))

            else:

                x2 = cross_shifted_x



            if pad_d1 > 0 or pad_r > 0 or pad_b > 0:

                x2 = x2[:, :D, :H, :W, :].contiguous()



        return x, x2, v, k, q



    def forward_part2(self, x):

        return self.drop_path(self.mlp(self.norm2(x)))



    def forward_part3(self, x):

        return self.mlp(self.norm2(x))



    def forward(self, x, mask_matrix, prev_v, prev_k, prev_q, is_decoder=False):

        """ Forward function.

        Args:

            x: Input feature, tensor size (B, D, H, W, C).

            mask_matrix: Attention mask for cyclic shift.

        """



        alpha = 0.5

        shortcut = x

        x2, v, k, q = None, None, None, None



        if self.use_checkpoint:

            x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)

        else:

            x, x2, v, k, q = self.forward_part1(x, mask_matrix, prev_v, prev_k, prev_q, is_decoder)



        x = shortcut + self.drop_path(x)



        if self.use_checkpoint:

            x = x + checkpoint.checkpoint(self.forward_part2, x)

        else:

            x = x + self.forward_part2(x)



        if x2 is not None:

            x2 = shortcut + self.drop_path(x2)

            if self.use_checkpoint:

                x2 = x2 + checkpoint.checkpoint(self.forward_part2, x2)

            else:

                x2 = x2 + self.forward_part2(x2)



            FPE = PositionalEncoding3D(x.shape[4])



            x = torch.add((1 - alpha) * x, alpha * x2) + self.forward_part3(FPE(x))



        return x, v, k, q





class PatchMerging(nn.Module):

    """ Patch Merging Layer

    Args:

        dim (int): Number of input channels.

        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm

    """



    def __init__(self, dim, norm_layer=nn.LayerNorm):

        super().__init__()

        self.dim = dim

        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)

        self.norm = norm_layer(4 * dim)



    def forward(self, x):

        """ Forward function.

        Args:

            x: Input feature, tensor size (B, D, H, W, C).

        """

        B, D, H, W, C = x.shape



        # padding

        pad_input = (H % 2 == 1) or (W % 2 == 1)

        if pad_input:

            x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))



        x0 = x[:, :, 0::2, 0::2, :]  # B D H/2 W/2 C

        x1 = x[:, :, 1::2, 0::2, :]  # B D H/2 W/2 C

        x2 = x[:, :, 0::2, 1::2, :]  # B D H/2 W/2 C

        x3 = x[:, :, 1::2, 1::2, :]  # B D H/2 W/2 C

        x = torch.cat([x0, x1, x2, x3], -1)  # B D H/2 W/2 4*C



        x = self.norm(x)

        x = self.reduction(x)



        return x





class PatchExpand_Up(nn.Module):

    def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm, D_ratio=1):

        super().__init__()

        self.input_resolution = input_resolution

        self.dim_scale = dim_scale

        self.dim = dim

        self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity()

        self.norm = norm_layer(dim // dim_scale)
        
        self.D_ratio = D_ratio


    def forward(self, x):

        """

        x: B, H*W, C

        """

        D, H, W = self.input_resolution

        x = x.flatten(2).transpose(1, 2)

        x = self.expand(x)

        B, L, C = x.shape

        # assert L == D * H * W, "input feature has wrong size"


        x = x.view(B, 32 // self.D_ratio, H, W, C)

        x = rearrange(x, 'b d h w (p1 p2 c)-> b d (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C // 4)



        x = self.norm(x)

        x = x.permute(0, 4, 1, 2, 3)



        return x





class PatchExpand(nn.Module):

    def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):

        super().__init__()

        self.input_resolution = input_resolution

        self.dim_scale = dim_scale

        self.dim = dim

        # self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity()

        self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity()

        self.norm = norm_layer(dim // dim_scale)



    def forward(self, x):

        """

        x: B, H*W, C

        """

        D, H, W = self.input_resolution

        x = x.flatten(2).transpose(1, 2)

        x = self.expand(x)

        B, L, C = x.shape

        # assert L == D * H * W, "input feature has wrong size"



        x = x.view(B, D * 8, H, W, C)

        x = rearrange(x, 'b d h w (p1 p2 c)-> b d (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C // 4)



        x = self.norm(x)

        x = x.permute(0, 4, 1, 2, 3)



        return x





class FinalPatchExpand_X4(nn.Module):

    def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):

        super().__init__()
        
        if isinstance(dim_scale, int):
            dim_scale = [dim_scale, dim_scale, dim_scale]

        self.input_resolution = input_resolution

        self.dim = dim

        self.dim_scale = dim_scale

        self.expand = nn.Linear(dim, dim_scale[0] * dim_scale[1] * dim_scale[2] * dim, bias=False)

        self.output_dim = dim

        self.norm = norm_layer(self.output_dim)



    def forward(self, x):

        """

        x: B, H*W, C

        """

        D, H, W = self.input_resolution

        x = x.permute(0, 4, 1, 2, 3)

        x = x.flatten(2).transpose(1, 2)

        x = self.expand(x)

        B, L, C = x.shape



        x = x.view(B, D, H, W, C)

        x = rearrange(x, 'b d h w (p1 p2 p3 c)-> b (d p1) (h p2) (w p3) c', p1=self.dim_scale[0], p2=self.dim_scale[1],

                      p3=self.dim_scale[2],

                      c=C // (self.dim_scale[0] * self.dim_scale[1] * self.dim_scale[2]))

        # x = x.view(B, -1, self.output_dim)

        x = self.norm(x)



        return x





class BasicLayer_up(nn.Module):

    """ A basic Swin Transformer layer for one stage.

    Args:

        dim (int): Number of input channels.

        input_resolution (tuple[int]): Input resolution.

        depth (int): Number of blocks.

        num_heads (int): Number of attention heads.

        window_size tuple(int): Local window size.

        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.

        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True

        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.

        drop (float, optional): Dropout rate. Default: 0.0

        attn_drop (float, optional): Attention dropout rate. Default: 0.0

        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0

        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm

        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None

        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.

    """



    def __init__(self, dim, input_resolution, depth, num_heads, window_size=(7, 7, 7),

                 mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,

                 drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False, D_ratio=1):



        super().__init__()

        self.dim = dim

        self.input_resolution = input_resolution

        self.window_size = window_size

        self.shift_size = tuple(i // 2 for i in window_size)

        self.depth = depth

        self.use_checkpoint = use_checkpoint

        self.D_ratio = D_ratio



        # build blocks

        self.blocks = nn.ModuleList([

            SwinTransformerBlock3D(

                dim=dim,

                num_heads=num_heads,

                window_size=window_size,

                shift_size=(0, 0, 0) if (i % 2 == 0) else self.shift_size,

                mlp_ratio=mlp_ratio,

                qkv_bias=qkv_bias,

                qk_scale=qk_scale,

                drop=drop,

                attn_drop=attn_drop,

                drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,

                norm_layer=norm_layer,

                use_checkpoint=use_checkpoint,

            )

            for i in range(depth)])



        # patch merging layer

        if upsample is not None:

            self.upsample = PatchExpand_Up(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer, D_ratio=self.D_ratio)

        else:

            self.upsample = None



    def forward(self, x, prev_v1, prev_k1, prev_q1, prev_v2, prev_k2, prev_q2):

        """ Forward function.

        Args:

            x: Input feature, tensor size (B, C, D, H, W).

        """

        # calculate attention mask for SW-MSA

        B, C, D, H, W = x.shape

        window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)

        x = rearrange(x, 'b c d h w -> b d h w c')

        Dp = int(np.ceil(D / window_size[0])) * window_size[0]

        Hp = int(np.ceil(H / window_size[1])) * window_size[1]

        Wp = int(np.ceil(W / window_size[2])) * window_size[2]

        attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)



        for idx, blk in enumerate(self.blocks):

            if idx % 2 == 0:

                x, _, _, _ = blk(x, attn_mask, prev_v1, prev_k1, prev_q1, True)

            else:

                x, _, _, _ = blk(x, attn_mask, prev_v2, prev_k2, prev_q2, True)



        # x = x.view(B, D, H, W, -1)



        if self.upsample is not None:

            x = x.permute(0, 4, 1, 2, 3)

            x = self.upsample(x)

        # x = rearrange(x, 'b d h w c -> b c d h w')

        return x





# cache each stage results_1

@lru_cache()

def compute_mask(D, H, W, window_size, shift_size, device):

    img_mask = torch.zeros((1, D, H, W, 1), device=device)  # 1 Dp Hp Wp 1

    cnt = 0

    for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):

        for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):

            for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):

                img_mask[:, d, h, w, :] = cnt

                cnt += 1

    mask_windows = window_partition(img_mask, window_size)  # nW, ws[0]*ws[1]*ws[2], 1

    mask_windows = mask_windows.squeeze(-1)  # nW, ws[0]*ws[1]*ws[2]

    attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)

    attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))

    return attn_mask





class BasicLayer(nn.Module):

    """ A basic Swin Transformer layer for one stage.

    Args:

        dim (int): Number of feature channels

        depth (int): Depths of this stage.

        num_heads (int): Number of attention head.

        window_size (tuple[int]): Local window size. Default: (1,7,7).

        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.

        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True

        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.

        drop (float, optional): Dropout rate. Default: 0.0

        attn_drop (float, optional): Attention dropout rate. Default: 0.0

        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0

        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm

        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None

    """



    def __init__(self,

                 dim,

                 depth,

                 depths,

                 num_heads,

                 window_size=(1, 7, 7),

                 mlp_ratio=4.,

                 qkv_bias=False,

                 qk_scale=None,

                 drop=0.,

                 attn_drop=0.,

                 drop_path=0.,

                 drop_path_rate=0.,

                 norm_layer=nn.LayerNorm,

                 downsample=None,

                 use_checkpoint=False):

        super().__init__()

        self.window_size = window_size

        self.shift_size = tuple(i // 2 for i in window_size)

        self.depth = depth

        self.use_checkpoint = use_checkpoint



        # build blocks

        self.blocks = nn.ModuleList([

            SwinTransformerBlock3D(

                dim=dim,

                num_heads=num_heads,

                window_size=window_size,

                shift_size=(0, 0, 0) if (i % 2 == 0) else self.shift_size,

                mlp_ratio=mlp_ratio,

                qkv_bias=qkv_bias,

                qk_scale=qk_scale,

                drop=drop,

                attn_drop=attn_drop,

                drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,

                norm_layer=norm_layer,

                use_checkpoint=use_checkpoint,

            )

            for i in range(depth)])



        self.downsample = downsample

        if self.downsample is not None:

            self.downsample = downsample(dim=dim, norm_layer=norm_layer)



    def forward(self, x, block_num):

        """ Forward function.

        Args:

            x: Input feature, tensor size (B, C, D, H, W).

        """

        # calculate attention mask for SW-MSA

        B, C, D, H, W = x.shape

        window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)

        x = rearrange(x, 'b c d h w -> b d h w c')



        Dp = int(np.ceil(D / window_size[0])) * window_size[0]

        Hp = int(np.ceil(H / window_size[1])) * window_size[1]

        Wp = int(np.ceil(W / window_size[2])) * window_size[2]

        attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)



        v1, k1, q1, v2, k2, q2 = None, None, None, None, None, None



        for idx, blk in enumerate(self.blocks):

            if idx % 2 == 0:

                x, v1, k1, q1 = blk(x, attn_mask, None, None, None)

            else:

                x, v2, k2, q2 = blk(x, attn_mask, None, None, None)



        x = x.reshape(B, D, H, W, -1)



        if self.downsample is not None:

            x = self.downsample(x)

        x = rearrange(x, 'b d h w c -> b c d h w')



        return x, v1, k1, q1, v2, k2, q2





class PatchEmbed3D(nn.Module):

    """ Video to Patch Embedding.

    Args:

        patch_size (int): Patch token size. Default: (2,4,4).

        in_chans (int): Number of input video channels. Default: 3.

        embed_dim (int): Number of linear projection output channels. Default: 96.

        norm_layer (nn.Module, optional): Normalization layer. Default: None

    """



    def __init__(self, img_size=(128, 128, 128), patch_size=(4, 4, 4), in_chans=3, embed_dim=96, norm_layer=None):

        super().__init__()

        self.patch_size = patch_size



        self.in_chans = in_chans

        self.embed_dim = embed_dim

        patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1], img_size[1] // patch_size[1]]

        self.patches_resolution = patches_resolution



        self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)

        if norm_layer is not None:

            self.norm = norm_layer(embed_dim)

        else:

            self.norm = None



    def forward(self, x):

        """Forward function."""

        # padding

        _, _, D, H, W = x.size()

        if W % self.patch_size[2] != 0:

            x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2]))

        if H % self.patch_size[1] != 0:

            x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]))

        if D % self.patch_size[0] != 0:

            x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0]))



        x = self.proj(x)  # B C D Wh Ww

        if self.norm is not None:

            D, Wh, Ww = x.size(2), x.size(3), x.size(4)

            x = x.flatten(2).transpose(1, 2)

            x = self.norm(x)

            x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww)



        return x





class SwinTransformerSys3D(nn.Module):

    r""" Swin Transformer

        A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`  -

          https://arxiv.org/pdf/2103.14030

    Args:

        img_size (int | tuple(int)): Input image size. Default 224

        patch_size (int | tuple(int)): Patch size. Default: 4

        in_chans (int): Number of input image channels. Default: 3

        num_classes (int): Number of classes for classification head. Default: 1000

        embed_dim (int): Patch embedding dimension. Default: 96

        depths (tuple(int)): Depth of each Swin Transformer layer.

        num_heads (tuple(int)): Number of attention heads in different layers.

        window_size (tuple(int)): Window size. Default: (7,7,7)

        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4

        qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True

        qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None

        drop_rate (float): Dropout rate. Default: 0

        attn_drop_rate (float): Attention dropout rate. Default: 0

        drop_path_rate (float): Stochastic depth rate. Default: 0.1

        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.

        ape (bool): If True, add absolute position embedding to the patch embedding. Default: False

        patch_norm (bool): If True, add normalization after patch embedding. Default: True

        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False

    """



    def __init__(self, pretrained=None,

                 pretrained2d=True,

                 img_size=(128, 128, 128),

                 patch_size=(4, 4, 4),

                 in_chans=4,

                 num_classes=3,

                 embed_dim=96,

                 depths=[2, 2, 2, 1],

                 depths_decoder=[1, 2, 2, 2],

                 num_heads=[3, 6, 12, 24],

                 window_size=(7, 7, 7),

                 mlp_ratio=4.,

                 qkv_bias=True,

                 qk_scale=None,

                 drop_rate=0.,

                 attn_drop_rate=0.,

                 drop_path_rate=0.1,

                 norm_layer=nn.LayerNorm,

                 patch_norm=True,

                 use_checkpoint=False,

                 frozen_stages=-1,

                 final_upsample="expand_first", **kwargs):

        super().__init__()



        print(

            "SwinTransformerSys3D expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{};embed_dims:{};window:{}".format(

                depths,

                depths_decoder, drop_path_rate, num_classes, embed_dim, window_size))



        self.pretrained = pretrained

        self.pretrained2d = pretrained2d

        self.num_classes = num_classes

        self.num_layers = len(depths)

        self.embed_dim = embed_dim
        
        self.patch_size = patch_size
        self.D_ratio = (img_size[1] // self.patch_size[1]) // (img_size[0] // self.patch_size[0])

        self.patch_norm = patch_norm

        self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))

        self.num_features_up = int(embed_dim * 2)

        self.mlp_ratio = mlp_ratio

        self.final_upsample = final_upsample

        self.frozen_stages = frozen_stages



        # split image into non-overlapping patches

        self.patch_embed = PatchEmbed3D(img_size=img_size,

                                        patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,

                                        norm_layer=norm_layer if self.patch_norm else None)



        patches_resolution = self.patch_embed.patches_resolution

        self.patches_resolution = patches_resolution



        self.pos_drop = nn.Dropout(p=drop_rate)



        # stochastic depth

        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule



        # build encoder and bottleneck layers

        self.layers = nn.ModuleList()

        for i_layer in range(self.num_layers):

            layer = BasicLayer(

                dim=int(embed_dim * 2 ** i_layer),

                depth=depths[i_layer],

                depths=depths,

                num_heads=num_heads[i_layer],

                window_size=window_size,

                mlp_ratio=mlp_ratio,

                qkv_bias=qkv_bias,

                qk_scale=qk_scale,

                drop=drop_rate,

                attn_drop=attn_drop_rate,

                drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],

                drop_path_rate=drop_path_rate,

                norm_layer=norm_layer,

                downsample=PatchMerging if i_layer < self.num_layers - 1 else None,

                use_checkpoint=use_checkpoint)

            self.layers.append(layer)



        # build decoder layers

        self.layers_up = nn.ModuleList()

        self.concat_back_dim = nn.ModuleList()

        for i_layer in range(self.num_layers):

            concat_linear = nn.Linear(2 * int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),

                                      int(embed_dim * 2 ** (

                                              self.num_layers - 1 - i_layer)),

                                      bias=False) if i_layer > 0 else nn.Identity()

            if i_layer == 0:

                layer_up = PatchExpand(

                    input_resolution=(patches_resolution[0] // (2 ** (self.num_layers - 1 - i_layer)),

                                      patches_resolution[1] // (2 ** (self.num_layers - 1 - i_layer)),

                                      patches_resolution[2] // (2 ** (self.num_layers - 1 - i_layer))),

                    dim=int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)), dim_scale=2, norm_layer=norm_layer)

            else:

                layer_up = BasicLayer_up(

                    dim=int(embed_dim * 2 ** (self.num_layers - 1 - i_layer)),

                    input_resolution=(patches_resolution[0] // (2 ** (self.num_layers - 1 - i_layer)),

                                      patches_resolution[1] // (2 ** (self.num_layers - 1 - i_layer)),

                                      patches_resolution[2] // (2 ** (self.num_layers - 1 - i_layer))),

                    depth=depths[(self.num_layers - 1 - i_layer)],

                    num_heads=num_heads[(self.num_layers - 1 - i_layer)],

                    window_size=window_size,

                    mlp_ratio=mlp_ratio,

                    qkv_bias=qkv_bias,

                    qk_scale=qk_scale,

                    drop=drop_rate,

                    attn_drop=attn_drop_rate,

                    drop_path=dpr[sum(depths[:(self.num_layers - 1 - i_layer)]):sum(

                        depths[:(self.num_layers - 1 - i_layer) + 1])],

                    norm_layer=norm_layer,

                    upsample=PatchExpand if (i_layer < self.num_layers - 1) else None,

                    use_checkpoint=use_checkpoint,
                    D_ratio = self.D_ratio
                    )



            self.layers_up.append(layer_up)

            self.concat_back_dim.append(concat_linear)



        self.norm = norm_layer(self.num_features)

        self.norm_up = norm_layer(self.embed_dim)



        if self.final_upsample == "expand_first":

            print("---final upsample expand_first---")

            self.up = FinalPatchExpand_X4(input_resolution=(

                img_size[0] // patch_size[0], img_size[1] // patch_size[1], img_size[2] // patch_size[2]),

                dim_scale=self.patch_size, dim=embed_dim)

            self.output = nn.Conv3d(in_channels=embed_dim, out_channels=self.num_classes, kernel_size=1, bias=False)



        self._freeze_stages()



    @torch.jit.ignore

    def no_weight_decay(self):

        return {'absolute_pos_embed'}



    @torch.jit.ignore

    def no_weight_decay_keywords(self):

        return {'relative_position_bias_table'}



    # Encoder and Bottleneck

    def forward_features(self, x):

        x = self.patch_embed(x)

        x = self.pos_drop(x)

        x_downsample = []

        v_values_1 = []

        k_values_1 = []

        q_values_1 = []

        v_values_2 = []

        k_values_2 = []

        q_values_2 = []



        for i, layer in enumerate(self.layers):

            x_downsample.append(x)

            x, v1, k1, q1, v2, k2, q2 = layer(x, i)

            v_values_1.append(v1)

            k_values_1.append(k1)

            q_values_1.append(q1)

            v_values_2.append(v2)

            k_values_2.append(k2)

            q_values_2.append(q2)



        x = rearrange(x, 'n c d h w -> n d h w c')

        x = self.norm(x)

        x = rearrange(x, 'n d h w c -> n c d h w')



        return x, x_downsample, v_values_1, k_values_1, q_values_1, v_values_2, k_values_2, q_values_2



    # Dencoder and Skip connection

    def forward_up_features(self, x, x_downsample, v_values_1, k_values_1, q_values_1, v_values_2, k_values_2,

                            q_values_2):

        for inx, layer_up in enumerate(self.layers_up):

            if inx == 0:

                x = layer_up(x)

            else:

                x = torch.cat([x, x_downsample[3 - inx]], 1)

                B, C, D, H, W = x.shape

                x = x.flatten(2).transpose(1, 2)

                x = self.concat_back_dim[inx](x)

                _, _, C = x.shape

                x = x.view(B, D, H, W, C)



                x = x.permute(0, 4, 1, 2, 3)

                x = layer_up(x, v_values_1[3 - inx], k_values_1[3 - inx], q_values_1[3 - inx], v_values_2[3 - inx],

                             k_values_2[3 - inx], q_values_2[3 - inx])



        x = self.norm_up(x)



        return x



    def up_x4(self, x):

        D, H, W = self.patches_resolution

        B, _, _, _, C = x.shape



        if self.final_upsample == "expand_first":

            x = self.up(x)

            x = x.view(B, self.patch_size[0] * D, self.patch_size[1] * H, self.patch_size[2] * W, -1)

            x = x.permute(0, 4, 1, 2, 3)  # B,C,D,H,W

            x = self.output(x)



        return x



    def _freeze_stages(self):

        if self.frozen_stages >= 0:

            self.patch_embed.eval()

            for param in self.patch_embed.parameters():

                param.requires_grad = False



        if self.frozen_stages >= 1:

            self.pos_drop.eval()

            for i in range(0, self.frozen_stages):

                m = self.layers[i]

                m.eval()

                for param in m.parameters():

                    param.requires_grad = False



    def inflate_weights(self):

        """Inflate the swin2d parameters to swin3d.

        The differences between swin3d and swin2d mainly lie in an extra

        axis. To utilize the pretrained parameters in 2d model,

        the weight of swin2d my_models should be inflated to fit in the shapes of

        the 3d counterpart.

        Args:

            logger (logging.Logger): The logger used to print

                debugging infomation.

        """

        checkpoint = torch.load(self.pretrained, map_location='cpu')

        state_dict = checkpoint['model']



        # delete relative_position_index since we always re-init it

        relative_position_index_keys = [k for k in state_dict.keys() if "relative_position_index" in k]

        for k in relative_position_index_keys:

            del state_dict[k]



        # delete attn_mask since we always re-init it

        attn_mask_keys = [k for k in state_dict.keys() if "attn_mask" in k]

        for k in attn_mask_keys:

            del state_dict[k]



        state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).repeat(1, 1,self.patch_size[0], 1, 1) / self.patch_size[0]



        # bicubic interpolate relative_position_bias_table if not match

        relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]

        for k in relative_position_bias_table_keys:

            relative_position_bias_table_pretrained = state_dict[k]

            relative_position_bias_table_current = self.state_dict()[k]

            L1, nH1 = relative_position_bias_table_pretrained.size()

            L2, nH2 = relative_position_bias_table_current.size()

            L2 = (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)

            wd = self.window_size[0]

            if nH1 != nH2:

                print(f"Error in loading {k}, passing")

            else:

                if L1 != L2:

                    S1 = int(L1 ** 0.5)

                    relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(

                        relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),

                        size=(2 * self.window_size[1] - 1, 2 * self.window_size[2] - 1),

                        mode='bicubic')

                    relative_position_bias_table_pretrained = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)

            state_dict[k] = relative_position_bias_table_pretrained.repeat(2 * wd - 1, 1)



        msg = self.load_state_dict(state_dict, strict=False)

        print(msg)

        print(f"=> loaded successfully '{self.pretrained}'")

        del checkpoint

        torch.cuda.empty_cache()



    def init_weights(self, pretrained=None):

        """Initialize the weights in backbone.

        Args:

            pretrained (str, optional): Path to pre-trained weights.

                Defaults to None.

        """



        def _init_weights(m):

            if isinstance(m, nn.Linear):

                trunc_normal_(m.weight, std=.02)

                if isinstance(m, nn.Linear) and m.bias is not None:

                    nn.init.constant_(m.bias, 0)

            elif isinstance(m, nn.LayerNorm):

                nn.init.constant_(m.bias, 0)

                nn.init.constant_(m.weight, 1.0)



        if pretrained:

            self.pretrained = pretrained

        if isinstance(self.pretrained, str):

            self.apply(_init_weights)



            print(f'load model from: {self.pretrained}')



            if self.pretrained2d:

                # Inflate 2D model into 3D model.

                self.inflate_weights()

            else:

                # Directly load 3D model.

                load_checkpoint(self, self.pretrained, strict=False)

        elif self.pretrained is None:

            self.apply(_init_weights)

        else:

            raise TypeError('pretrained must be a str or None')



    def forward(self, x):

        x, x_downsample, v_values_1, k_values_1, q_values_1, v_values_2, k_values_2, q_values_2 = self.forward_features(x)

        x = self.forward_up_features(x, x_downsample, v_values_1, k_values_1, q_values_1, v_values_2, k_values_2, q_values_2)

        x = self.up_x4(x)



        return x
