import torch
from torch import nn

from SwinEncoder import BasicLayer
from cross_attn import CBAMBlock


class SwinASPP(nn.Module):
    def __init__(self, input_size, input_dim, out_dim, cross_attn,
                 depth, num_heads, mlp_ratio, qkv_bias, qk_scale,
                 drop_rate, attn_drop_rate, drop_path_rate,
                 norm_layer, aspp_norm, aspp_activation, start_window_size,
                 aspp_dropout, downsample, use_checkpoint):

        super().__init__()

        self.out_dim = out_dim
        if input_size == 24:
            self.possible_window_sizes = [4, 6, 8, 12, 24]
        else:
            self.possible_window_sizes = [i for i in range(start_window_size, input_size + 1) if input_size % i == 0]

        self.layers = nn.ModuleList()
        for ws in self.possible_window_sizes:
            layer = BasicLayer(dim=int(input_dim),
                               input_resolution=(input_size, input_size),
                               depth=1 if ws == input_size else depth,
                               num_heads=num_heads,
                               window_size=ws,
                               mlp_ratio=mlp_ratio,
                               qkv_bias=qkv_bias, qk_scale=qk_scale,
                               drop=drop_rate, attn_drop=attn_drop_rate,
                               drop_path=drop_path_rate,
                               norm_layer=norm_layer,
                               downsample=downsample,
                               use_checkpoint=use_checkpoint)

            self.layers.append(layer)

        if cross_attn == 'CBAM':
            self.proj = CBAMBlock(input_dim=len(self.layers) * input_dim,
                                  reduction=12,
                                  input_size=input_size,
                                  out_dim=out_dim)
        else:
            self.proj = nn.Linear(len(self.layers) * input_dim, out_dim)

        # Check if needed
        self.norm = norm_layer(out_dim) if aspp_norm else None
        if aspp_activation == 'relu':
            self.activation = nn.ReLU()
        elif aspp_activation == 'gelu':
            self.activation = nn.GELU()
        elif aspp_activation is None:
            self.activation = None

        self.dropout = nn.Dropout(aspp_dropout)

    def forward(self, x):
        """
        x: input tensor (high level features) with shape (batch_size, input_size, input_size, input_dim)

        returns ...
        """
        B, H, W, C = x.shape
        x = x.view(B, H * W, C)

        features = []
        for layer in self.layers:
            out, _ = layer(x)
            features.append(out)

        features = torch.cat(features, dim=-1)
        features = self.proj(features)

        # Check if needed
        if self.norm is not None:
            features = self.norm(features)
        if self.activation is not None:
            features = self.activation(features)
        features = self.dropout(features)

        return features.view(B, H, W, self.out_dim)


if __name__ == '__main__':
    high_level = torch.randn(1, 14, 14, 384)
    model = SwinASPP(
        input_size=14,
        input_dim=384,
        out_dim=96,
        cross_attn='CBAM',
        depth=2,
        num_heads=3,
        mlp_ratio=4.0,  # 推荐：MLP 的隐藏层是输入层的四倍
        qkv_bias=True,  # 推荐：启用 QKV 计算的偏置项
        qk_scale=None,  # 推荐：让模型自动计算 QK 缩放
        drop_rate=0.1,  # 推荐：常用的 Dropout 率
        attn_drop_rate=0.1,  # 推荐：注意力层的 Dropout 率
        drop_path_rate=0.1,  # 推荐：路径丢弃率
        norm_layer=nn.LayerNorm,  # 推荐：使用 LayerNorm
        aspp_norm=True,  # 假设：在 ASPP 输出后应用正则化
        aspp_activation='relu',  # 推荐：ReLU 激活函数，也可以考虑使用 GELU
        start_window_size=7,  # 推荐：开始窗口大小与 input_size 相等
        aspp_dropout=0.1,  # 推荐：ASPP模块的 Dropout 率
        downsample=None,  # 假设：不在 ASPP 中进一步下采样
        use_checkpoint=False  # 假设：不使用梯度检查点以节省内存
    )
    output = model(high_level)
    print(output.shape)
    # torch.Size([1, 14, 14, 96])
