import torch
from torch import nn

from ASPP import SwinASPP
from Decoder import SwinDecoder
from SwinEncoder import SwinEncoder


class SwinDeepLab(nn.Module):
    def __init__(self, num_class=1):
        super().__init__()
        # 初始化 SwinEncoder 使用默认参数
        self.encoder = SwinEncoder(high_level_idx=2, low_level_idx=0)
        self.num_class = num_class
        # 初始化 SwinDecoder 和 SwinASPP
        self.decoder = SwinDecoder(
            low_level_idx=0,  # 示例值，需要根据实际网络结构指定
            high_level_idx=2,  # 示例值，需要根据实际网络结构指定
            input_size=14,
            input_dim=96,
            num_classes=self.num_class,  # 假设有num_class个分类
            depth=3,  # 示例值，需要根据实际情况调整
            last_layer_depth=6,
            num_heads=3,
            window_size=7,
            mlp_ratio=4.0,
            qkv_bias=True,
            qk_scale=None,
            drop_rate=0.0,
            attn_drop_rate=0.0,
            drop_path_rate=0.1,
            norm_layer=nn.LayerNorm,
            decoder_norm=True,
            use_checkpoint=False,
        )

        self.aspp = SwinASPP(
            input_size=14,
            input_dim=384,
            out_dim=96,
            cross_attn='CBAM',
            depth=2,
            num_heads=3,
            mlp_ratio=4.0,  # 推荐：MLP 的隐藏层是输入层的四倍
            qkv_bias=True,  # 推荐：启用 QKV 计算的偏置项
            qk_scale=None,  # 推荐：让模型自动计算 QK 缩放
            drop_rate=0.1,  # 推荐：常用的 Dropout 率
            attn_drop_rate=0.1,  # 推荐：注意力层的 Dropout 率
            drop_path_rate=0.1,  # 推荐：路径丢弃率
            norm_layer=nn.LayerNorm,  # 推荐：使用 LayerNorm
            aspp_norm=True,  # 假设：在 ASPP 输出后应用正则化
            aspp_activation='relu',  # 推荐：ReLU 激活函数，也可以考虑使用 GELU
            start_window_size=7,  # 推荐：开始窗口大小与 input_size 相等
            aspp_dropout=0.1,  # 推荐：ASPP模块的 Dropout 率
            downsample=None,  # 假设：不在 ASPP 中进一步下采样
            use_checkpoint=False  # 假设：不使用梯度检查点以节省内存

        )

    def run_encoder(self, x):
        low_level, high_level = self.encoder(x)
        return low_level, high_level

    def run_aspp(self, x):
        return self.aspp(x)

    def run_decoder(self, low_level, pyramid):
        return self.decoder(low_level, pyramid)

    def forward(self, x):
        low_level, high_level = self.run_encoder(x)
        x = self.run_aspp(high_level)
        x = self.run_decoder(low_level, x)

        return nn.Sigmoid()(x)


if __name__ == '__main__':
    input = torch.randn(1, 3, 224, 224)
    model = SwinDeepLab(1)  # 从第0层[步长等于窗口长度的卷积]开始,0缩小4倍，每加1缩小2倍
    output = model(input)
    print(output.shape)
