from collections import OrderedDict

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
from torchvision.models import resnet34, resnet18
from torchvision.models.resnet import BasicBlock, ResNet
from torchvision.models.segmentation.deeplabv3 import ASPPConv, ASPPPooling


def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
    return nn.Sequential(OrderedDict([
        ('conv', nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, groups=groups, bias=False)),
        ('bn', nn.BatchNorm2d(num_features=out_channels))
    ]))


class RepVGGBlock(nn.Module):

    def __init__(self, in_channels, out_channels, kernel_size=3,
                 stride=1, padding=1, dilation=1, groups=1, deploy=False):
        super(RepVGGBlock, self).__init__()
        self.deploy = deploy
        self.groups = groups
        self.in_channels = in_channels

        assert kernel_size == 3
        assert padding == 1
        padding_11 = padding - kernel_size // 2

        self.relu = nn.ReLU(True)
        self.se = nn.Identity()

        if deploy:
            self.rbr_reparam = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation,
                                         groups=groups, bias=True, padding_mode='zeros')
        else:
            self.rbr_identity = nn.BatchNorm2d(in_channels) if out_channels == in_channels and stride == 1 else None
            self.rbr_dense = conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=groups)
            self.rbr_1x1 = conv_bn(in_channels, out_channels, 1, stride, padding_11, groups=groups)

    def forward(self, inputs):
        if hasattr(self, 'rbr_reparam'):
            return self.relu(self.se(self.rbr_reparam(inputs)))
        id_out = 0 if self.rbr_identity is None else self.rbr_identity(inputs)
        return self.relu(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))

    def get_equivalent_kernel_bias(self):
        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid

    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
        return 0 if kernel1x1 is None else torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])

    def _fuse_bn_tensor(self, branch):
        if branch is None:
            return 0, 0
        if isinstance(branch, nn.Sequential):
            kernel = branch.conv.weight
            running_mean = branch.bn.running_mean
            running_var = branch.bn.running_var
            gamma = branch.bn.weight
            beta = branch.bn.bias
            eps = branch.bn.eps
        else:
            assert isinstance(branch, nn.BatchNorm2d)
            if not hasattr(self, 'id_tensor'):
                input_dim = self.in_channels // self.groups
                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
                for i in range(self.in_channels):
                    kernel_value[i, i % input_dim, 1, 1] = 1
                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
            kernel = self.id_tensor
            running_mean = branch.running_mean
            running_var = branch.running_var
            gamma = branch.weight
            beta = branch.bias
            eps = branch.eps
        std = (running_var + eps).sqrt()
        t = (gamma / std).reshape(-1, 1, 1, 1)
        return kernel * t, beta - running_mean * gamma / std

    def switch_to_deploy(self):
        if hasattr(self, 'rbr_reparam'):
            return
        kernel, bias = self.get_equivalent_kernel_bias()
        self.rbr_reparam = nn.Conv2d(
            in_channels=self.rbr_dense.conv.in_channels,
            out_channels=self.rbr_dense.conv.out_channels,
            kernel_size=self.rbr_dense.conv.kernel_size,
            stride=self.rbr_dense.conv.stride,
            padding=self.rbr_dense.conv.padding,
            dilation=self.rbr_dense.conv.dilation,
            groups=self.rbr_dense.conv.groups,
            bias=True)
        self.rbr_reparam.weight.data = kernel
        self.rbr_reparam.bias.data = bias
        for para in self.parameters():
            para.detach_()
        self.__delattr__('rbr_dense')
        self.__delattr__('rbr_1x1')
        if hasattr(self, 'rbr_identity'):
            self.__delattr__('rbr_identity')
        if hasattr(self, 'id_tensor'):
            self.__delattr__('id_tensor')
        self.deploy = True


class RepConvBNReLU(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, deploy=False):
        super(RepConvBNReLU, self).__init__()
        self.conv = nn.Sequential(
            RepVGGBlock(in_channels,
                        out_channels,
                        kernel_size=kernel_size,
                        stride=stride,
                        padding=padding,
                        dilation=dilation,
                        deploy=deploy),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


def base_upsample(x, size):
    return F.interpolate(x, size=size, mode='bilinear', align_corners=True)


class FuckDecoder6(nn.Module):
    def __init__(self, in_channels, fine_channels, num_classes):
        super(FuckDecoder6, self).__init__()
        self.de_en_conv = nn.Sequential(
            RepConvBNReLU(in_channels * 3, in_channels * 3, 3, 1, 1),
            RepConvBNReLU(in_channels * 3, in_channels, 3, 1, 1),
        )
        self.down = RepConvBNReLU(in_channels, fine_channels, 3, 1, 1)
        self.fine_conv = nn.Sequential(
            RepConvBNReLU(fine_channels, fine_channels, 3, 1, 1),
            RepConvBNReLU(fine_channels, fine_channels, 3, 1, 1),
        )
        self.out = nn.Sequential(
            RepConvBNReLU(in_channels + fine_channels, in_channels, 3, 1, 1),
            RepConvBNReLU(in_channels, in_channels, 3, 1, 1),
        )

        self.deep_sup = nn.Sequential(
            RepConvBNReLU(in_channels, in_channels, 3, 1, 1),
            nn.Conv2d(in_channels, num_classes, 1)
        )

    def forward(self, de, en, fine):
        de = base_upsample(de, en.size()[2:])
        de_en = self.de_en_conv(torch.cat([de, en], dim=1))
        down = fine * base_upsample(self.down(de_en), fine.size()[2:])
        out = self.out(torch.cat([de_en, base_upsample(down, de_en.size()[2:])], dim=1))
        edge = self.fine_conv(down + fine)
        sup = self.deep_sup(out)
        return out, edge, sup


class FuckLoss6(nn.Module):
    def __init__(self, w=0.4):
        super(FuckLoss6, self).__init__()
        self.dice = nn.BCEWithLogitsLoss()
        self.bce = nn.BCEWithLogitsLoss(reduction='none')
        self.w = w

    def edge_loss(self, edge, pred, target):
        edge = F.interpolate(edge, pred.size()[2:], mode='nearest')
        target = F.interpolate(target, pred.size()[2:], mode='nearest')
        edge[edge != 0] = 4
        edge[edge == 0] = 1.0 / 4
        return (self.bce(pred, target) * edge).mean()

    def active_contour_loss(self, y_true, y_pred):
        y_pred = torch.sigmoid(y_pred)
        c_in = torch.ones_like(y_pred)
        c_out = torch.zeros_like(y_pred)
        region_in = torch.mean(y_pred * (y_true - c_in) ** 2)
        region_out = torch.mean((1 - y_pred) * (y_true - c_out) ** 2)
        region = region_in + region_out
        return region

    def forward(self, x, y):
        loss = self.dice(x[0], y[0])
        loss += self.active_contour_loss(y[0], x[0]) * 0.4
        loss += self.edge_loss(y[1], x[1], y[0]) * 0.4

        if len(y) > 2:
            for i in range(2, len(y)):
                sup_gt = F.interpolate(y[0], x[i].size()[2:], mode='nearest')
                loss += self.dice(x[i], sup_gt) * self.w

        return loss


class ResNet34WithoutMaxPool(nn.Module):
    def __init__(self, pretrained=False):
        super(ResNet34WithoutMaxPool, self).__init__()
        self.in_channels = [64, 128, 256, 512]
        res = resnet34(pretrained=pretrained)
        self.layer0 = nn.Sequential(res.conv1, res.bn1, res.relu)
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4
        del res

    def forward(self, x):
        x0 = self.layer0(x)
        x1 = self.layer1(x0)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)
        return [x1, x2, x3, x4]


class ResNet18WithoutMaxPool(nn.Module):
    def __init__(self, pretrained=False):
        super(ResNet18WithoutMaxPool, self).__init__()
        self.in_channels = [64, 128, 256, 512]
        res = resnet18(pretrained=pretrained)
        self.layer0 = nn.Sequential(res.conv1, res.bn1, res.relu)
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4
        del res

    def forward(self, x):
        x0 = self.layer0(x)
        x1 = self.layer1(x0)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)
        return [x1, x2, x3, x4]


class ResNet10WithoutMaxPool(nn.Module):
    def __init__(self, pretrained=False):
        super(ResNet10WithoutMaxPool, self).__init__()
        self.in_channels = [64, 128, 256, 512]
        res = ResNet(BasicBlock, [1, 1, 1, 1])
        if pretrained:
            state_dict = load_state_dict_from_url("https://download.pytorch.org/models/resnet18-5c106cde.pth",
                                                  progress=True)
            res.load_state_dict(state_dict, strict=False)
        self.layer0 = nn.Sequential(res.conv1, res.bn1, res.relu)
        self.layer1 = res.layer1
        self.layer2 = res.layer2
        self.layer3 = res.layer3
        self.layer4 = res.layer4
        del res

    def forward(self, x):
        x0 = self.layer0(x)
        x1 = self.layer1(x0)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)
        return [x1, x2, x3, x4]


class ConvBNReLU(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=False):
        super(ConvBNReLU, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels,
                      out_channels,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=padding,
                      dilation=dilation,
                      bias=bias),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


class ASPPWithDIYDilate(nn.Module):
    def __init__(self, in_channels, out_channels, atrous_rates):
        super(ASPPWithDIYDilate, self).__init__()
        modules = [ConvBNReLU(in_channels, out_channels, 1)]

        rate1, rate2, rate3 = tuple(atrous_rates)
        modules.append(ASPPConv(in_channels, out_channels, rate1))
        modules.append(ASPPConv(in_channels, out_channels, rate2))
        modules.append(ASPPConv(in_channels, out_channels, rate3))
        modules.append(ASPPPooling(in_channels, out_channels))

        self.convs = nn.ModuleList(modules)

        self.project = nn.Sequential(
            nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True),
            nn.Dropout(0.3))

    def forward(self, x):
        res = []
        for conv in self.convs:
            res.append(conv(x))
        res = torch.cat(res, dim=1)
        return self.project(res)


class Fuck7Two(nn.Module):
    def __init__(self, num_classes=2, pretrained=None):
        super(Fuck7Two, self).__init__()
        self.backbone = ResNet34WithoutMaxPool(pretrained=pretrained)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.dsn = nn.Sequential(
            RepConvBNReLU(in_channels[3], in_channels[2], 3, 1, 1),
            nn.Dropout2d(0.1),
            nn.Conv2d(in_channels[2], num_classes, 3, 1, 1)
        )
        self.edge = nn.Sequential(
            RepConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            RepConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder6(in_channels[2], edge_channels, 1)
        self.up2 = FuckDecoder6(in_channels[1], edge_channels, 1)
        self.up1 = FuckDecoder6(in_channels[0], edge_channels, 1)

        self.fine_out = nn.Conv2d(edge_channels, 1, 3, 1, 1)

        self.out = nn.Sequential(
            RepConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            RepConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        dsn = self.dsn(out)
        out, edge, _ = self.up3(out, x3, edge)
        out, edge, _ = self.up2(out, x2, edge)
        out, edge, _ = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        return [out, dsn, middle]


class SimpleEncoder(nn.Module):
    def __init__(self, deploy=False):
        super(SimpleEncoder, self).__init__()
        self.in_channels = [64, 128, 256, 512]
        self.layer0 = RepConvBNReLU(3, self.in_channels[0], 3, 2, 1)
        self.layer1 = nn.Sequential(
            RepConvBNReLU(self.in_channels[0], self.in_channels[0], 3, 1, 1, deploy=deploy),
            RepConvBNReLU(self.in_channels[0], self.in_channels[0], 3, 1, 1, deploy=deploy),
        )
        self.layer2 = nn.Sequential(
            RepConvBNReLU(self.in_channels[0], self.in_channels[1], 3, 2, 1, deploy=deploy),
            RepConvBNReLU(self.in_channels[1], self.in_channels[1], 3, 1, 1, deploy=deploy),
        )
        self.layer3 = nn.Sequential(
            RepConvBNReLU(self.in_channels[1], self.in_channels[2], 3, 2, 1, deploy=deploy),
            RepConvBNReLU(self.in_channels[2], self.in_channels[2], 3, 1, 1, deploy=deploy),
        )
        self.layer4 = nn.Sequential(
            RepConvBNReLU(self.in_channels[2], self.in_channels[3], 3, 2, 1, deploy=deploy),
            RepConvBNReLU(self.in_channels[3], self.in_channels[3], 3, 1, 1, deploy=deploy),
        )

    def forward(self, x):
        x = self.layer0(x)
        x1 = self.layer1(x)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)
        return [x1, x2, x3, x4]


class FuckSmallDecoder(nn.Module):
    def __init__(self, in_channels, fine_channels, deploy):
        super(FuckSmallDecoder, self).__init__()
        self.de_en_conv = nn.Sequential(
            RepConvBNReLU(in_channels * 3, in_channels * 3, 3, 1, 1, deploy=deploy),
            RepConvBNReLU(in_channels * 3, in_channels, 3, 1, 1, deploy=deploy),
        )
        self.down = RepConvBNReLU(in_channels, fine_channels, 3, 1, 1, deploy=deploy)
        self.fine_conv = nn.Sequential(
            RepConvBNReLU(fine_channels, fine_channels, 3, 1, 1, deploy=deploy),
            RepConvBNReLU(fine_channels, fine_channels, 3, 1, 1, deploy=deploy),
        )
        self.out = nn.Sequential(
            RepConvBNReLU(in_channels + fine_channels, in_channels, 3, 1, 1, deploy=deploy),
            RepConvBNReLU(in_channels, in_channels, 3, 1, 1, deploy=deploy),
        )

    def forward(self, de, en, fine):
        de = base_upsample(de, en.size()[2:])
        de_en = self.de_en_conv(torch.cat([de, en], dim=1))
        down = fine * base_upsample(self.down(de_en), fine.size()[2:])
        out = self.out(torch.cat([de_en, base_upsample(down, de_en.size()[2:])], dim=1))
        edge = self.fine_conv(down + fine)
        return out, edge


class FuckSmall(nn.Module):
    def __init__(self, num_classes=1, deploy=False):
        super(FuckSmall, self).__init__()
        self.backbone = SimpleEncoder(deploy=deploy)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = RepConvBNReLU(in_channels[0], edge_channels, 3, 1, 1)
        self.up3 = FuckSmallDecoder(in_channels[2], edge_channels, deploy=deploy)
        self.up2 = FuckSmallDecoder(in_channels[1], edge_channels, deploy=deploy)
        self.up1 = FuckSmallDecoder(in_channels[0], edge_channels, deploy=deploy)

        self.out = nn.Sequential(
            RepConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            RepConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge = self.up3(out, x3, edge)
        out, edge = self.up2(out, x2, edge)
        out, edge = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]


class Fuck7(nn.Module):
    def __init__(self, num_classes, pretrained=None):
        super(Fuck7, self).__init__()
        self.backbone = ResNet34WithoutMaxPool(pretrained=pretrained)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = nn.Sequential(
            RepConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            RepConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder6(in_channels[2], edge_channels, num_classes)
        self.up2 = FuckDecoder6(in_channels[1], edge_channels, num_classes)
        self.up1 = FuckDecoder6(in_channels[0], edge_channels, num_classes)
        self.out = nn.Sequential(
            RepConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            RepConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge, _ = self.up3(out, x3, edge)
        out, edge, _ = self.up2(out, x2, edge)
        out, edge, _ = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]


class Fuck7Res18(nn.Module):
    def __init__(self, num_classes, pretrained=None):
        super(Fuck7Res18, self).__init__()
        self.backbone = ResNet18WithoutMaxPool(pretrained=pretrained)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = nn.Sequential(
            RepConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            RepConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder6(in_channels[2], edge_channels, num_classes)
        self.up2 = FuckDecoder6(in_channels[1], edge_channels, num_classes)
        self.up1 = FuckDecoder6(in_channels[0], edge_channels, num_classes)
        self.out = nn.Sequential(
            RepConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            RepConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge, _ = self.up3(out, x3, edge)
        out, edge, _ = self.up2(out, x2, edge)
        out, edge, _ = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]


class FuckDecoder8(nn.Module):
    def __init__(self, in_channels, fine_channels, num_classes):
        super(FuckDecoder8, self).__init__()
        self.de_en_conv = nn.Sequential(
            ConvBNReLU(in_channels * 3, in_channels * 3, 3, 1, 1),
            ConvBNReLU(in_channels * 3, in_channels, 3, 1, 1),
        )
        self.down = ConvBNReLU(in_channels, fine_channels, 3, 1, 1)
        self.fine_conv = nn.Sequential(
            ConvBNReLU(fine_channels, fine_channels, 3, 1, 1),
            ConvBNReLU(fine_channels, fine_channels, 3, 1, 1),
        )
        self.out = nn.Sequential(
            ConvBNReLU(in_channels + fine_channels, in_channels, 3, 1, 1),
            ConvBNReLU(in_channels, in_channels, 3, 1, 1),
        )

        self.deep_sup = nn.Sequential(
            ConvBNReLU(in_channels, in_channels, 3, 1, 1),
            nn.Conv2d(in_channels, num_classes, 1)
        )

    def forward(self, de, en, fine):
        de = base_upsample(de, en.size()[2:])
        de_en = self.de_en_conv(torch.cat([de, en], dim=1))
        down = fine * base_upsample(self.down(de_en), fine.size()[2:])
        out = self.out(torch.cat([de_en, base_upsample(down, de_en.size()[2:])], dim=1))
        edge = self.fine_conv(down + fine)
        sup = self.deep_sup(out)
        return out, edge, sup


class Fuck8(nn.Module):
    def __init__(self, num_classes, pretrained=None):
        super(Fuck8, self).__init__()
        self.backbone = ResNet34WithoutMaxPool(pretrained=pretrained)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = nn.Sequential(
            ConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            ConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder8(in_channels[2], edge_channels, num_classes)
        self.up2 = FuckDecoder8(in_channels[1], edge_channels, num_classes)
        self.up1 = FuckDecoder8(in_channels[0], edge_channels, num_classes)
        self.out = nn.Sequential(
            ConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            ConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge, sup3 = self.up3(out, x3, edge)
        out, edge, sup2 = self.up2(out, x2, edge)
        out, edge, sup1 = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]


class FuckLoss8(nn.Module):
    def __init__(self, w=0.4):
        super(FuckLoss8, self).__init__()
        self.dice = nn.CrossEntropyLoss()
        self.bce = nn.CrossEntropyLoss(reduction='none')
        self.w = w

    def edge_loss(self, edge, pred, target):
        edge = F.interpolate(edge, pred.size()[2:], mode='nearest')

        gt = target.unsqueeze(0).clone().float()
        gt = F.interpolate(gt, pred.size()[2:], mode='nearest')
        gt = gt.squeeze(0).long()

        edge[edge != 0] = 4.0
        edge[edge == 0] = 1.0
        return (self.bce(pred, gt) * edge).mean()

    def forward(self, x, y):
        loss = self.dice(x[0], y[0])
        loss += self.edge_loss(y[1], x[1], y[0]) * 0.4

        if len(y) > 2:
            for i in range(2, len(y)):
                sup_gt = y[0].unsqueeze(0).clone().float()
                sup_gt = F.interpolate(sup_gt, x[i].size()[2:], mode='nearest')
                sup_gt = sup_gt.squeeze(0).long()
                loss += self.dice(x[i], sup_gt) * self.w

        return loss


class Fuck8Small(nn.Module):
    def __init__(self, num_classes, pretrained=None):
        super(Fuck8Small, self).__init__()
        self.backbone = SimpleEncoder()
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = nn.Sequential(
            ConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            ConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder8(in_channels[2], edge_channels, num_classes)
        self.up2 = FuckDecoder8(in_channels[1], edge_channels, num_classes)
        self.up1 = FuckDecoder8(in_channels[0], edge_channels, num_classes)
        self.out = nn.Sequential(
            ConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            ConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge, sup3 = self.up3(out, x3, edge)
        out, edge, sup2 = self.up2(out, x2, edge)
        out, edge, sup1 = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]


class Fuck8Res18(nn.Module):
    def __init__(self, num_classes, pretrained=None):
        super(Fuck8Res18, self).__init__()
        self.backbone = ResNet18WithoutMaxPool(pretrained=pretrained)
        in_channels = self.backbone.in_channels
        edge_channels = in_channels[0]
        self.middle = ASPPWithDIYDilate(in_channels[3], in_channels[3], [5, 11, 17])
        self.edge = nn.Sequential(
            ConvBNReLU(in_channels[0], edge_channels, 3, 1, 1),
            ConvBNReLU(edge_channels, edge_channels, 3, 1, 1)
        )
        self.up3 = FuckDecoder8(in_channels[2], edge_channels, num_classes)
        self.up2 = FuckDecoder8(in_channels[1], edge_channels, num_classes)
        self.up1 = FuckDecoder8(in_channels[0], edge_channels, num_classes)
        self.out = nn.Sequential(
            ConvBNReLU(in_channels[0] + edge_channels, in_channels[0], 3, 1, 1),
            ConvBNReLU(in_channels[0], in_channels[0], 3, 1, 1),
            nn.Conv2d(in_channels[0], num_classes, 3, 1, 1)
        )
        self.fine_out = nn.Conv2d(edge_channels, num_classes, 3, 1, 1)

    def forward(self, xx):
        x1, x2, x3, x4 = self.backbone(xx)
        edge = self.edge(x1)
        out = self.middle(x4)
        middle = out
        out, edge, sup3 = self.up3(out, x3, edge)
        out, edge, sup2 = self.up2(out, x2, edge)
        out, edge, sup1 = self.up1(out, x1, edge)
        out = self.out(torch.cat([out, edge], dim=1))
        out = F.interpolate(out, xx.size()[2:], mode='bilinear', align_corners=False)
        edge = self.fine_out(edge)
        return [out, edge, middle]
