"""
SegNet 模型实现
使用池化索引进行上采样的编码器-解码器结构
"""
import torch
import torch.nn as nn


class SegNet(nn.Module):
    """
    SegNet模型
    输入：(B, in_channels, H, W)
    输出：(B, out_channels, H, W)
    """

    def __init__(self, in_channels=1, out_channels=1):
        """
        Args:
            in_channels: 输入通道数
            out_channels: 输出通道数
        """
        super(SegNet, self).__init__()

        # 编码器
        # Stage 1
        self.enc1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )

        # Stage 2
        self.enc2 = nn.Sequential(
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )

        # Stage 3
        self.enc3 = nn.Sequential(
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        # Stage 4
        self.enc4 = nn.Sequential(
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        )

        # Stage 5
        self.enc5 = nn.Sequential(
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        )

        # 解码器
        # Stage 5
        self.dec5 = nn.Sequential(
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        )

        # Stage 4
        self.dec4 = nn.Sequential(
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        # Stage 3
        self.dec3 = nn.Sequential(
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True)
        )

        # Stage 2
        self.dec2 = nn.Sequential(
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True)
        )

        # Stage 1
        self.dec1 = nn.Sequential(
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, out_channels, 3, padding=1)
        )

        # 池化和上采样
        self.pool = nn.MaxPool2d(2, 2, return_indices=True)
        self.unpool = nn.MaxUnpool2d(2, 2)

    def forward(self, x):
        # 编码器
        x1 = self.enc1(x)
        x1_pooled, indices1 = self.pool(x1)

        x2 = self.enc2(x1_pooled)
        x2_pooled, indices2 = self.pool(x2)

        x3 = self.enc3(x2_pooled)
        x3_pooled, indices3 = self.pool(x3)

        x4 = self.enc4(x3_pooled)
        x4_pooled, indices4 = self.pool(x4)

        x5 = self.enc5(x4_pooled)
        x5_pooled, indices5 = self.pool(x5)

        # 解码器
        x5_unpooled = self.unpool(x5_pooled, indices5)
        x5_dec = self.dec5(x5_unpooled)

        x4_unpooled = self.unpool(x5_dec, indices4)
        x4_dec = self.dec4(x4_unpooled)

        x3_unpooled = self.unpool(x4_dec, indices3)
        x3_dec = self.dec3(x3_unpooled)

        x2_unpooled = self.unpool(x3_dec, indices2)
        x2_dec = self.dec2(x2_unpooled)

        x1_unpooled = self.unpool(x2_dec, indices1)
        output = self.dec1(x1_unpooled)

        return output


if __name__ == '__main__':
    # 测试模型
    model = SegNet(in_channels=1, out_channels=1)
    x = torch.randn(2, 1, 256, 256)
    y = model(x)
    print(f"输入形状: {x.shape}")
    print(f"输出形状: {y.shape}")
    print(f"参数量: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M")
