import torch
import torch.nn as nn

from nni.retiarii.nn.pytorch import LayerChoice
from nni.retiarii import model_wrapper
from unet import DepthwiseSeparableConv, AttentionBlock


CONV_KWARGS = dict(kernel_size=3, padding=1)


@model_wrapper
class EncoderBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = LayerChoice(
            [
                nn.Conv2d(in_channels, out_channels, **CONV_KWARGS),
                DepthwiseSeparableConv(in_channels, out_channels),
            ],
            label="conv1",
        )
        self.norm1 = nn.BatchNorm2d(out_channels)
        self.conv2 = LayerChoice(
            [
                nn.Conv2d(out_channels, out_channels, **CONV_KWARGS),
                DepthwiseSeparableConv(out_channels, out_channels),
            ],
            label="conv2",
        )
        self.norm2 = nn.BatchNorm2d(out_channels)
        self.pool = LayerChoice([nn.MaxPool2d(2), nn.AvgPool2d(2)], label="pool")
        self.activation = LayerChoice([nn.ReLU(), nn.ELU()], label="active_fn")

    def forward(self, x):
        x = self.activation(self.norm1(self.conv1(x)))
        x = self.activation(self.norm2(self.conv2(x)))
        p = self.pool(x)
        return x, p


@model_wrapper
class DecoderBlock(nn.Module):
    def __init__(self, in_channels, skip_channels, out_channels):
        super().__init__()
        # 更新了这里的in_channels，加上了skip_channels
        self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
        self.attention = AttentionBlock(out_channels, skip_channels, skip_channels // 2)

        self.conv1 = LayerChoice(
            [
                nn.Conv2d(out_channels + skip_channels, out_channels, **CONV_KWARGS),
                DepthwiseSeparableConv(out_channels + skip_channels, out_channels),
            ],
            label="conv1",
        )
        self.norm1 = nn.BatchNorm2d(out_channels)
        self.conv2 = LayerChoice(
            [
                nn.Conv2d(out_channels, out_channels, **CONV_KWARGS),
                DepthwiseSeparableConv(out_channels, out_channels),
            ],
            label="conv2",
        )
        self.norm2 = nn.BatchNorm2d(out_channels)
        self.activation = LayerChoice([nn.ReLU(), nn.ELU()], label="active_fn")

    def forward(self, x, skip):
        x = self.up(x)
        skip = self.attention(x, skip)
        # 在合并前进行上采样
        x = torch.cat([x, skip], dim=1)
        x = self.activation(self.norm1(self.conv1(x)))
        x = self.activation(self.norm2(self.conv2(x)))
        return x


@model_wrapper
class UNet(nn.Module):
    def __init__(self, in_channels=3, out_channels=1, min_channels=64):
        super().__init__()
        self.enc1 = EncoderBlock(in_channels, min_channels)
        self.enc2 = EncoderBlock(min_channels, min_channels * 2)
        self.enc3 = EncoderBlock(min_channels * 2, min_channels * 4)
        self.enc4 = EncoderBlock(min_channels * 4, min_channels * 8)

        self.center = LayerChoice(
            [
                nn.Conv2d(min_channels * 8, min_channels * 16, **CONV_KWARGS),
                DepthwiseSeparableConv(min_channels * 8, min_channels * 16),
            ],
            label="center",
        )

        self.dec4 = DecoderBlock(min_channels * 16, min_channels * 8, min_channels * 8)
        self.dec3 = DecoderBlock(min_channels * 8, min_channels * 4, min_channels * 4)
        self.dec2 = DecoderBlock(min_channels * 4, min_channels * 2, min_channels * 2)
        self.dec1 = DecoderBlock(min_channels * 2, min_channels, min_channels)

        self.final_conv = nn.Conv2d(min_channels, out_channels, kernel_size=1)

    def forward(self, x):
        x1, p1 = self.enc1(x)
        x2, p2 = self.enc2(p1)
        x3, p3 = self.enc3(p2)
        x4, p4 = self.enc4(p3)

        center = self.center(p4)

        d4 = self.dec4(center, x4)
        d3 = self.dec3(d4, x3)
        d2 = self.dec2(d3, x2)
        d1 = self.dec1(d2, x1)

        out = self.final_conv(d1)
        return torch.sigmoid(out)
