import torch
import torch.nn as nn


class febnet1(nn.Module):
    def __init__(self):
        super(febnet1, self).__init__()
        self.CBR1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1),
                                  # nn.BatchNorm2d(64),
                                  nn.ReLU())
        self.CBR = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
                                 # nn.BatchNorm2d(64),
                                 nn.ReLU())
        self.dCBR = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1, dilation=1),
                                  # nn.BatchNorm2d(64),
                                  nn.ReLU())
        self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)

    def forward(self, x):
        f1 = self.CBR1(x)
        f2 = self.dCBR(f1)
        f3 = self.CBR(f2)
        f4 = self.CBR(f3)
        f5 = self.dCBR(f4)
        f6 = self.CBR(f5)
        f7 = self.CBR(f6)
        f8 = self.CBR(f7)
        f9 = self.dCBR(f8)
        f10 = self.CBR(f9)
        f11 = self.CBR(f10)
        f12 = self.dCBR(f11)
        f13 = self.CBR(f12)
        f14 = self.CBR(f13)
        f15 = self.CBR(f14)
        f16 = self.conv1(f15)
        return f16


class febnet2(nn.Module):
    def __init__(self):
        super(febnet2, self).__init__()
        self.CR = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
                                nn.ReLU())
        layers = [nn.Sequential(nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
                                nn.ReLU())]
        for _ in range(14):
            layers.append(self.CR)

        self.layers = nn.Sequential(*layers)

        self.CB1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1, padding=0)

    def forward(self, x):
        f1 = self.layers(x)
        f2 = self.CB1(f1)
        return f2


class dudenet(nn.Module):
    def __init__(self):
        super(dudenet, self).__init__()
        self.feb1 = febnet1()
        self.feb2 = febnet1()
        self.CB2 = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, padding=0)
        self.CB3 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=1, padding=0)
        self.BR1 = nn.Sequential(nn.BatchNorm2d(128),
                                 nn.ReLU())
        self.BR2 = nn.Sequential(nn.BatchNorm2d(2),
                                 nn.ReLU())
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)
            if isinstance(m, nn.BatchNorm2d):
                m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)
                clip_b = 0.025
                w = m.weight.data.shape[0]
                for j in range(w):
                    if 0 <= m.weight.data[j] < clip_b:
                        m.weight.data[j] = clip_b
                    elif -clip_b < m.weight.data[j] < 0:
                        m.weight.data[j] = -clip_b
                m.running_var.fill_(0.01)

    def forward(self, x):
        f11 = self.feb1(x)
        f12 = self.feb2(x)
        f_EB1 = torch.cat([f11, f12], dim=1)
        f_EB1 = self.BR1(f_EB1)
        f_CB2 = self.CB2(f_EB1)
        f_EB2 = torch.cat([x, f_CB2], dim=1)
        f_EB2 = self.BR2(f_EB2)
        f_CB3 = self.CB3(f_EB2)

        return f_CB3
