import math

import torch
import torch.nn as nn


def window_partition(x, window_size):
    """将特征图划分成不重叠的窗口
    Args:
        x: (B, H, W, C) 输入特征图
        window_size: 窗口大小，形如(window_h, window_w)
    Returns:
        windows: (num_windows*B, window_h, window_w, C)
    """
    B, H, W, C = x.shape
    x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
    # 重排维度并展平成窗口序列
    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
    return windows


def window_reverse(windows, window_size, H, W):
    """将窗口重新组合成特征图
    Args:
        windows: (num_windows*B, window_h, window_w, C)
        window_size: 窗口大小
        H: 输出特征图高度
        W: 输出特征图宽度
    Returns:
        x: (B, H, W, C)
    """
    C = windows.shape[-1]
    x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
    return x


def get_relative_position_index(win_h: int, win_w: int):
    """计算窗口内token之间的相对位置索引
    Args:
        win_h: 窗口高度
        win_w: 窗口宽度
    Returns:
        相对位置索引矩阵 shape: (win_h*win_w, win_h*win_w)
    """
    # 生成网格坐标
    coords = torch.stack(torch.meshgrid(torch.arange(win_h), torch.arange(win_w), indexing='ij'))  # 2, Wh, Ww
    coords_flatten = torch.flatten(coords, 1)  # 2, Wh*Ww
    # 计算相对坐标
    relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 2, Wh*Ww, Wh*Ww
    relative_coords = relative_coords.permute(1, 2, 0).contiguous()  # Wh*Ww, Wh*Ww, 2
    # 调整坐标范围使其从0开始
    relative_coords[:, :, 0] += win_h - 1
    relative_coords[:, :, 1] += win_w - 1
    relative_coords[:, :, 0] *= 2 * win_w - 1
    return relative_coords.sum(-1)  # Wh*Ww, Wh*Ww


class WindowAttention(nn.Module):
    """Swin Transformer的窗口自注意力模块"""

    def __init__(
            self,
            dim,  # 输入特征维度
            window_size,  # 窗口大小
            num_heads=4  # 注意力头数
    ):
        super().__init__()
        self.window_size = window_size
        self.window_area = self.window_size[0] * self.window_size[1]
        self.num_heads = num_heads
        head_dim = dim // self.num_heads
        self.scale = head_dim ** -0.5  # 注意力分数的缩放因子

        # 相对位置偏置表
        self.relative_position_bias_table = nn.Parameter(
            torch.zeros((2 * window_size[0] - 1) ** 2, self.num_heads))

        # 获取窗口内每个token对的相对位置索引
        self.register_buffer("relative_position_index",
                             get_relative_position_index(self.window_size[0], self.window_size[1]),
                             persistent=False)

        # QKV变换矩阵
        self.qkv = nn.Linear(dim, dim * 3)
        # 输出投影
        self.proj = nn.Linear(dim, dim)

        # 初始化相对位置偏置表
        torch.nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
        self.softmax = nn.Softmax(dim=-1)

    def _get_rel_pos_bias(self):
        """获取相对位置偏置"""
        relative_position_bias = self.relative_position_bias_table[
            self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1)
        relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
        return relative_position_bias.unsqueeze(0)

    def forward(self, x, mask=None):
        """
        Args:
            x: 输入特征 shape: (num_windows*B, N, C)
            mask: 注意力掩码 (0/-inf) shape: (num_windows, Wh*Ww, Wh*Ww)
        """
        B_, N, C = x.shape
        # 生成qkv
        qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
        q, k, v = qkv.unbind(0)  # 分离qkv

        # 计算注意力分数
        q = q * self.scale
        attn = q @ k.transpose(-2, -1)
        attn = attn + self._get_rel_pos_bias()

        # 应用掩码(如果有)
        if mask is not None:
            num_win = mask.shape[0]
            attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
            attn = attn.view(-1, self.num_heads, N, N)

        # softmax归一化
        attn = self.softmax(attn)

        # 注意力加权
        x = attn @ v
        x = x.transpose(1, 2).reshape(B_, N, -1)
        x = self.proj(x)
        return x


class SwinTransformerBlock(nn.Module):
    """Swin Transformer块，包含窗口注意力和FFN"""

    def __init__(
            self, dim, input_resolution, window_size=7, shift_size=0):
        """
        Args:
            dim: 输入特征维度
            input_resolution: 输入分辨率
            window_size: 窗口大小
            shift_size: 窗口移位大小
        """
        super().__init__()
        self.input_resolution = input_resolution
        window_size = (window_size, window_size)
        shift_size = (shift_size, shift_size)
        self.window_size = window_size
        self.shift_size = shift_size
        self.window_area = self.window_size[0] * self.window_size[1]

        # Layer Normalization
        self.norm1 = nn.LayerNorm(dim)
        # 窗口注意力层
        self.attn = WindowAttention(
            dim,
            window_size=self.window_size,
        )

        self.norm2 = nn.LayerNorm(dim)
        # 前馈网络
        self.mlp = nn.Sequential(
            nn.Linear(dim, 4 * dim),
            nn.GELU(),
            nn.LayerNorm(4 * dim),
            nn.Linear(4 * dim, dim)
        )

        # 如果使用窗口移位，计算注意力掩码
        if self.shift_size:
            H, W = self.input_resolution
            H = math.ceil(H / self.window_size[0]) * self.window_size[0]
            W = math.ceil(W / self.window_size[1]) * self.window_size[1]
            img_mask = torch.zeros((1, H, W, 1))
            cnt = 0
            # 为不同区域赋予不同的掩码值
            for h in (
                    slice(0, -self.window_size[0]),
                    slice(-self.window_size[0], -self.shift_size[0]),
                    slice(-self.shift_size[0], None)):
                for w in (
                        slice(0, -self.window_size[1]),
                        slice(-self.window_size[1], -self.shift_size[1]),
                        slice(-self.shift_size[1], None)):
                    img_mask[:, h, w, :] = cnt
                    cnt += 1
            mask_windows = window_partition(img_mask, self.window_size)
            mask_windows = mask_windows.view(-1, self.window_area)
            attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
            attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
        else:
            attn_mask = None

        self.register_buffer("attn_mask", attn_mask, persistent=False)

    def _attn(self, x):
        """执行窗口注意力计算"""
        B, H, W, C = x.shape

        # 执行循环移位(如果需要)
        if self.shift_size:
            shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
        else:
            shifted_x = x

        # 窗口划分
        x_windows = window_partition(shifted_x, self.window_size)
        x_windows = x_windows.view(-1, self.window_area, C)

        # 计算窗口注意力
        attn_windows = self.attn(x_windows, mask=self.attn_mask)

        # 恢复窗口
        attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
        shifted_x = window_reverse(attn_windows, self.window_size, H, W)
        shifted_x = shifted_x[:, :H, :W, :].contiguous()

        # 反向循环移位(如果需要)
        if self.shift_size:
            x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
        else:
            x = shifted_x
        return x

    def forward(self, x):
        """前向传播"""
        B, H, W, C = x.shape
        # 注意力残差连接
        x = x + self._attn(self.norm1(x))
        # MLP残差连接
        x = x.reshape(B, -1, C)
        x = x + self.mlp(self.norm2(x))
        x = x.reshape(B, H, W, C)
        return x


class PatchEmbedding(nn.Module):
    """图像补丁嵌入层"""

    def __init__(self, in_ch, num_feat, patch_size):
        """
        Args:
            in_ch: 输入通道数
            num_feat: 输出特征维度
            patch_size: 补丁大小
        """
        super().__init__()
        self.conv = nn.Conv2d(in_ch, num_feat, kernel_size=patch_size, stride=patch_size)

    def forward(self, X):
        # 输出shape: (batch_size, H, W, C)
        return self.conv(X).permute(0, 2, 3, 1)  # 没打平没layermorm


class PatchMerging(nn.Module):
    """特征图下采样层，将2x2区域合并为1个"""

    def __init__(self, dim):
        super().__init__()
        self.norm = nn.LayerNorm(4 * dim)
        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)

    def forward(self, x):
        """
        x: (B, H, W, C) -> (B, H/2, W/2, 2C)
        """
        B, H, W, C = x.shape
        x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
        x = self.norm(x)
        x = self.reduction(x)
        return x


class PatchExpansion(nn.Module):
    """特征图上采样层，将1个位置扩展为2x2区域"""

    def __init__(self, dim):
        super().__init__()
        self.norm = nn.LayerNorm(dim // 2)
        self.expand = nn.Linear(dim, 2 * dim, bias=False)

    def forward(self, x):
        """
        x: (B, H, W, C) -> (B, 2H, 2W, C/2)
        """
        x = self.expand(x)
        B, H, W, C = x.shape

        x = x.view(B, H, W, 2, 2, C // 4)
        x = x.permute(0, 1, 3, 2, 4, 5)
        x = x.reshape(B, H * 2, W * 2, C // 4)
        x = self.norm(x)
        return x


# class FinalPatchExpansion(nn.Module):
#     """最终的特征图上采样层，将1个位置扩展为4x4区域"""
#
#     def __init__(self, dim):
#         super().__init__()
#         self.norm = nn.LayerNorm(dim)
#         self.expand = nn.Linear(dim, 16 * dim, bias=False)
#
#     def forward(self, x):
#         """
#         x: (B, H, W, C) -> (B, 4H, 4W, C/16)
#         """
#         x = self.expand(x)
#         B, H, W, C = x.shape
#
#         x = x.view(B, H, W, 4, 4, C // 16)
#         x = x.permute(0, 1, 3, 2, 4, 5)
#         x = x.reshape(B, H * 4, W * 4, C // 16)
#         x = self.norm(x)
#         return x
class FinalPatchExpansion(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.conv = nn.Conv2d(dim, dim, 3, padding=1)

        # 第一次上采样模块
        self.up1 = nn.Sequential(
            nn.ConvTranspose2d(dim, dim // 2, 2, stride=2),
            nn.BatchNorm2d(dim // 2),
            nn.ReLU(inplace=True),
            nn.Conv2d(dim // 2, dim // 2, 3, padding=1)  # 额外的卷积
        )

        # 第二次上采样模块
        self.up2 = nn.Sequential(
            nn.ConvTranspose2d(dim // 2, dim // 8, 2, stride=2),
            nn.BatchNorm2d(dim // 8),
            nn.ReLU(inplace=True),
            nn.Conv2d(dim // 8, dim // 8, 3, padding=1)  # 额外的卷积
        )

        self.norm = nn.LayerNorm(dim // 8)

    def forward(self, x):
        # x: (B, H, W, C)
        x = x.permute(0, 3, 1, 2)

        identity = x
        x = self.conv(x)
        x = x + identity  # 残差连接

        # 分步上采样
        x = self.up1(x)
        x = self.up2(x)

        x = x.permute(0, 2, 3, 1)
        x = self.norm(x)

        return x


class SwinBlock(nn.Module):
    """包含两个Swin Transformer块的基本单元
    第一个块使用常规窗口注意力
    第二个块使用移位窗口注意力
    """

    def __init__(self, dims, ip_res, ss_size=3):
        """
        Args:
            dims: 输入特征维度
            ip_res: 输入分辨率(H, W)
            ss_size: 移位窗口大小
        """
        super().__init__()
        self.swtb1 = SwinTransformerBlock(dim=dims, input_resolution=ip_res)
        self.swtb2 = SwinTransformerBlock(dim=dims, input_resolution=ip_res, shift_size=ss_size)

    def forward(self, x):
        """连续应用两个Transformer块"""
        return self.swtb2(self.swtb1(x))


class Encoder(nn.Module):
    """Swin UNet的编码器部分
    包含多个下采样阶段，每个阶段包含特征处理和下采样
    """

    def __init__(self, C, partioned_ip_res, num_blocks=3):
        """
        Args:
            C: 基础特征维度
            partioned_ip_res: 分块后的输入分辨率(H, W)
            num_blocks: 编码器阶段数
        """
        super().__init__()
        H, W = partioned_ip_res[0], partioned_ip_res[1]
        # Swin Transformer特征处理块
        self.enc_swin_blocks = nn.ModuleList([
            SwinBlock(C, (H, W)),  # 第一阶段: C通道
            SwinBlock(2 * C, (H // 2, W // 2)),  # 第二阶段: 2C通道
            SwinBlock(4 * C, (H // 4, W // 4))  # 第三阶段: 4C通道
        ])
        # 下采样块
        self.enc_patch_merge_blocks = nn.ModuleList([
            PatchMerging(C),  # C -> 2C
            PatchMerging(2 * C),  # 2C -> 4C
            PatchMerging(4 * C)  # 4C -> 8C
        ])

    def forward(self, x):
        """
        前向传播，同时保存每个阶段的特征用于后续跳跃连接
        """
        skip_conn_ftrs = []
        for swin_block, patch_merger in zip(self.enc_swin_blocks, self.enc_patch_merge_blocks):
            x = swin_block(x)  # 特征处理
            skip_conn_ftrs.append(x)  # 保存特征用于跳跃连接
            x = patch_merger(x)  # 下采样
        return x, skip_conn_ftrs


class Decoder(nn.Module):
    """Swin UNet的解码器部分
    包含多个上采样阶段，每个阶段包含特征融合、处理和上采样
    """

    def __init__(self, C, partioned_ip_res, num_blocks=3):
        """
        Args:
            C: 基础特征维度
            partioned_ip_res: 分块后的输入分辨率
            num_blocks: 解码器阶段数
        """
        super().__init__()
        H, W = partioned_ip_res[0], partioned_ip_res[1]
        # Swin Transformer特征处理块
        self.dec_swin_blocks = nn.ModuleList([
            SwinBlock(4 * C, (H // 4, W // 4)),  # 第一阶段: 4C通道
            SwinBlock(2 * C, (H // 2, W // 2)),  # 第二阶段: 2C通道
            SwinBlock(C, (H, W))  # 第三阶段: C通道
        ])
        # 上采样块
        self.dec_patch_expand_blocks = nn.ModuleList([
            PatchExpansion(8 * C),  # 8C -> 4C
            PatchExpansion(4 * C),  # 4C -> 2C
            PatchExpansion(2 * C)  # 2C -> C
        ])
        # 跳跃连接特征融合层
        self.skip_conn_concat = nn.ModuleList([
            nn.Linear(8 * C, 4 * C),  # 融合后8C -> 4C
            nn.Linear(4 * C, 2 * C),  # 融合后4C -> 2C
            nn.Linear(2 * C, 1 * C)  # 融合后2C -> C
        ])

    def forward(self, x, encoder_features):
        """
        前向传播，结合编码器的跳跃连接特征
        Args:
            x: 输入特征
            encoder_features: 编码器各阶段的特征[从深到浅]
        """
        for patch_expand, swin_block, enc_ftr, linear_concatter in zip(
                self.dec_patch_expand_blocks,
                self.dec_swin_blocks,
                encoder_features,
                self.skip_conn_concat):
            x = patch_expand(x)  # 上采样
            x = torch.cat([x, enc_ftr], dim=-1)  # 特征拼接
            x = linear_concatter(x)  # 特征融合
            x = swin_block(x)  # 特征处理
        return x


class SwinUNet(nn.Module):
    """Swin Transformer UNet
    结合了Swin Transformer和UNet的图像分割模型
    """

    def __init__(self, H, W, ch, C, num_class, num_blocks=3, patch_size=4):
        """
        Args:
            H: 输入图像高度
            W: 输入图像宽度
            ch: 输入图像通道数
            C: 基础特征维度
            num_class: 分割类别数
            num_blocks: 编解码器阶段数
            patch_size: 图像分块大小
        """
        super().__init__()
        # 图像分块嵌入
        self.patch_embed = PatchEmbedding(ch, C, patch_size)
        # 编码器
        self.encoder = Encoder(C, (H // patch_size, W // patch_size), num_blocks)
        # 瓶颈层
        self.bottleneck = SwinBlock(C * (2 ** num_blocks),
                                    (H // (patch_size * (2 ** num_blocks)),
                                     W // (patch_size * (2 ** num_blocks))))
        # 解码器
        self.decoder = Decoder(C, (H // patch_size, W // patch_size), num_blocks)
        # 最终上采样
        self.final_expansion = FinalPatchExpansion(C)
        # 分割头
        self.head = nn.Conv2d(12, num_class, 1)
        # self.head = nn.Conv2d(C, num_class, 1)
        self.relu = nn.ReLU()
        self.Bn = nn.BatchNorm2d(num_features=num_class)

    def forward(self, x):
        """
        前向传播流程
        1. 图像分块并嵌入
        2. 编码器下采样和特征提取
        3. 瓶颈层处理
        4. 解码器上采样和特征融合
        5. 最终上采样到原始分辨率
        6. 输出分割图
        """
        x = self.patch_embed(x)  # 图像分块嵌入

        x, skip_ftrs = self.encoder(x)  # 编码器处理

        x = self.bottleneck(x)  # 瓶颈层处理

        x = self.decoder(x, skip_ftrs[::-1])  # 解码器处理

        x = self.final_expansion(x)  # 最终上采样

        x = self.head(x.permute(0, 3, 1, 2))  # 分割头输出

        return nn.Sigmoid()(x)  # sigmoid激活


if __name__ == '__main__':
    # 测试代码
    x = torch.randn(1, 3, 224, 224)  # 生成测试输入
    model = SwinUNet(224, 224, 3, 96, 1, 3, 4)  # 初始化模型
    y = model(x)  # 前向传播
    print(y.shape)  # 打印输出形状
