import math

import torch
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from torch import nn

from SwinEncoder import (SwinTransformerBlock)


class PatchExpand(nn.Module):
    def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.expand = nn.Linear(dim, 4 * dim, bias=False) if dim_scale == 2 else nn.Identity()
        self.norm = norm_layer(dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution
        x = self.expand(x)
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"

        x = x.view(B, H, W, C)
        x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C // 4)
        x = x.view(B, -1, C // 4)
        x = self.norm(x)

        return x


class FinalPatchExpand_X4(nn.Module):
    def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.dim_scale = dim_scale
        self.expand = nn.Linear(dim, 16 * dim, bias=False)
        self.output_dim = dim
        self.norm = norm_layer(self.output_dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution
        x = self.expand(x)
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"

        x = x.view(B, H, W, C)
        x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale,
                      c=C // (self.dim_scale ** 2))
        x = x.view(B, -1, self.output_dim)
        x = self.norm(x)

        return x


class BasicLayer_up(nn.Module):
    """ A basic Swin Transformer layer for one stage.

    Args:
        dim (int): Number of input channels.
        input_resolution (tuple[int]): Input resolution.
        depth (int): Number of blocks.
        num_heads (int): Number of attention heads.
        window_size (int): Local window size.
        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
        drop (float, optional): Dropout rate. Default: 0.0
        attn_drop (float, optional): Attention dropout rate. Default: 0.0
        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
    """

    def __init__(self, dim, input_resolution, depth, num_heads, window_size,
                 mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
                 drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):

        super().__init__()
        self.dim = dim
        self.input_resolution = input_resolution
        self.depth = depth
        self.use_checkpoint = use_checkpoint

        # build blocks
        self.blocks = nn.ModuleList([
            SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
                                 num_heads=num_heads, window_size=window_size,
                                 shift_size=0 if (i % 2 == 0) else window_size // 2,
                                 mlp_ratio=mlp_ratio,
                                 qkv_bias=qkv_bias, qk_scale=qk_scale,
                                 drop=drop, attn_drop=attn_drop,
                                 drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
                                 norm_layer=norm_layer)
            for i in range(depth)])

        # patch merging layer
        if upsample is not None:
            self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
        else:
            self.upsample = None

    def forward(self, x):
        for blk in self.blocks:
            if self.use_checkpoint:
                x = checkpoint.checkpoint(blk, x)
            else:
                x = blk(x)
        if self.upsample is not None:
            x = self.upsample(x)
        return x


class SwinDecoder(nn.Module):
    def __init__(self, low_level_idx, high_level_idx,
                 input_size, input_dim, num_classes,
                 depth, last_layer_depth, num_heads, window_size, mlp_ratio, qkv_bias, qk_scale,
                 drop_rate, attn_drop_rate, drop_path_rate, norm_layer, decoder_norm, use_checkpoint):
        super().__init__()
        self.low_level_idx = low_level_idx
        self.high_level_idx = high_level_idx

        self.layers_up = nn.ModuleList()
        for i in range(high_level_idx - low_level_idx):
            layer_up = BasicLayer_up(dim=int(input_dim),
                                     input_resolution=(input_size * 2 ** i, input_size * 2 ** i),
                                     depth=depth,
                                     num_heads=num_heads,
                                     window_size=window_size,
                                     mlp_ratio=mlp_ratio,
                                     qkv_bias=qkv_bias, qk_scale=qk_scale,
                                     drop=drop_rate, attn_drop=attn_drop_rate,
                                     drop_path=drop_path_rate,
                                     norm_layer=norm_layer,
                                     upsample=PatchExpand,
                                     use_checkpoint=use_checkpoint)

            self.layers_up.append(layer_up)

        self.last_layers_up = nn.ModuleList()
        for _ in range(low_level_idx + 1):
            i += 1
            last_layer_up = BasicLayer_up(dim=int(input_dim) * 2,
                                          input_resolution=(input_size * 2 ** i, input_size * 2 ** i),
                                          depth=last_layer_depth,
                                          num_heads=num_heads,
                                          window_size=window_size,
                                          mlp_ratio=mlp_ratio,
                                          qkv_bias=qkv_bias, qk_scale=qk_scale,
                                          drop=drop_rate, attn_drop=attn_drop_rate,
                                          drop_path=0.0,
                                          norm_layer=norm_layer,
                                          upsample=PatchExpand,
                                          use_checkpoint=use_checkpoint)
            self.last_layers_up.append(last_layer_up)

        i += 1
        self.final_up = PatchExpand(input_resolution=(input_size * 2 ** i, input_size * 2 ** i),
                                    dim=int(input_dim) * 2,
                                    dim_scale=2,
                                    norm_layer=norm_layer)

        if decoder_norm:
            self.norm_up = norm_layer(int(input_dim) * 2)
        else:
            self.norm_up = None
        self.output = nn.Conv2d(int(input_dim) * 2, num_classes, kernel_size=1, bias=False)

    def forward(self, low_level, aspp):
        """
        low_level: B, Hl, Wl, C
        aspp: B, Ha, Wa, C
        """
        B, Hl, Wl, C = low_level.shape
        _, Ha, Wa, _ = aspp.shape

        low_level = low_level.view(B, Hl * Wl, C)
        aspp = aspp.view(B, Ha * Wa, C)

        for layer in self.layers_up:  # 上采样，但是为什么又进行了swin注意力计算
            aspp = layer(aspp)

        x = torch.cat([low_level, aspp], dim=-1)

        for layer in self.last_layers_up:
            x = layer(x)

        if self.norm_up is not None:
            x = self.norm_up(x)

        x = self.final_up(x)

        B, L, C = x.shape
        H = W = int(math.sqrt(L))
        x = x.view(B, H, W, C)
        x = x.permute(0, 3, 1, 2).contiguous()
        x = self.output(x)

        return x


if __name__ == '__main__':
    low_level = torch.rand(1, 56, 56, 96)
    aspp = torch.rand(1, 14, 14, 96)
    decoder = SwinDecoder(
        low_level_idx=0,  # 示例值，需要根据实际网络结构指定
        high_level_idx=2,  # 示例值，需要根据实际网络结构指定
        input_size=14,
        input_dim=96,
        num_classes=1,  # 假设有21个分类
        depth=3,  # 示例值，需要根据实际情况调整
        last_layer_depth=6,
        num_heads=3,
        window_size=7,
        mlp_ratio=4.0,
        qkv_bias=True,
        qk_scale=None,
        drop_rate=0.0,
        attn_drop_rate=0.0,
        drop_path_rate=0.1,
        norm_layer=nn.LayerNorm,
        decoder_norm=True,
        use_checkpoint=False
    )
    output = decoder(low_level, aspp)
    print(output.shape)
