import torch
import torch.nn as nn


def conv_block(in_ch, out_ch, norm=True, leaky=True, dropout=False):
    layers = []
    if norm:
        layers.append(nn.BatchNorm2d(in_ch))
    layers.append(nn.LeakyReLU(0.01, inplace=True) if leaky else nn.ReLU(inplace=True))
    layers.append(nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1))
    if dropout:
        layers.append(nn.Dropout2d(0.3))
    layers.append(nn.BatchNorm2d(out_ch))
    layers.append(nn.LeakyReLU(0.01, inplace=True) if leaky else nn.ReLU(inplace=True))
    layers.append(nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
    return nn.Sequential(*layers)


def up_block(in_ch, out_ch):
    return nn.Sequential(
        nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
        nn.Conv2d(in_ch, out_ch, kernel_size=1)
    )


class UNetBlock(nn.Module):
    def __init__(self, in_ch, out_ch, norm=True):
        super().__init__()
        self.block = conv_block(in_ch, out_ch, norm=norm)

    def forward(self, x):
        return self.block(x)


class UNetPlusPlusL6_Attack(nn.Module):
    def __init__(self, in_ch=3, out_ch=3, residual=True, deep_supervision=False):
        super().__init__()
        self.residual = residual
        self.deep_supervision = deep_supervision

        filters = [64, 128, 256, 512, 1024, 2048, 4096]

        # Down path
        self.encoder = nn.ModuleList([
            UNetBlock(in_ch, filters[0], norm=False),
            UNetBlock(filters[0], filters[1], norm=False),
            UNetBlock(filters[1], filters[2], norm=False),
            UNetBlock(filters[2], filters[3], norm=False),
            UNetBlock(filters[3], filters[4], norm=False),
            UNetBlock(filters[4], filters[5], norm=False),
            UNetBlock(filters[5], filters[6], norm=False)
        ])

        self.pool = nn.MaxPool2d(2)
        self.upconvs = nn.ModuleList([
            up_block(filters[i], filters[i - 1]) for i in range(6, 0, -1)
        ])

        self.decoder = nn.ModuleList([
            UNetBlock(filters[i] * 2, filters[i], norm=True) for i in range(5, -1, -1)
        ])

        self.final = nn.Sequential(
            nn.BatchNorm2d(filters[0]),
            nn.LeakyReLU(0.01, inplace=True),
            nn.Conv2d(filters[0], out_ch, kernel_size=3, padding=1)
        )

        self._initialize_weights()

    def forward(self, x):
        image = x
        enc_feats = [self.encoder[0](x)]
        for i in range(1, 7):
            x = self.pool(enc_feats[-1])
            enc_feats.append(self.encoder[i](x))

        x = enc_feats[-1]  # bottleneck
        for i in range(6):
            x = self.upconvs[i](x)
            x = torch.cat([x, enc_feats[5 - i]], dim=1)
            x = self.decoder[i](x)

        out = self.final(x)
        return torch.clamp(image + out, 0, 1) if self.residual else out


    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
