import torch
import torch.nn as nn

from utils.LogUtil import my_logger


#  https://arxiv.org/pdf/1606.06650.pdf
class UNet3D(nn.Module):

    def __init__(self, input_channel, output_channel, if_activate=True):
        super(UNet3D, self).__init__()
        self.if_activate = if_activate
        self.output_channel = output_channel

        # the first layer
        padding_one = padding_two = padding_three = padding_four = padding_five = 1
        self.downConv1 = DownConv3D(input_channel, 64, padding_one, need_pooling=False)

        self.downConv2 = DownConv3D(64, 128, padding_two)

        self.downConv3 = DownConv3D(128, 256, padding_three)

        self.maxpooling3d = nn.MaxPool3d(kernel_size=2)

        self.bottomConv = UpConv3D(256, 512, padding_five)

        self.upConv1 = UpConv3D(768, 256, padding_five)

        self.upConv2 = UpConv3D(384, 128, padding_four)

        self.upConv3 = UpConv3D(192, 64, padding_three, if_up_conv=False)

        self.finalConv = nn.Conv3d(in_channels=64, out_channels=output_channel, kernel_size=1, stride=1)
        self.softMax = nn.Softmax(dim=1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        conv1_result = self.downConv1(x)  # The max pooling is the first operation in downConv1
        conv2_result = self.downConv2(conv1_result)
        conv3_result = self.downConv3(conv2_result)

        conv3_result_pooling = self.maxpooling3d(conv3_result)
        down_conv_result = self.bottomConv(conv3_result_pooling)

        x = torch.cat([conv3_result, down_conv_result], dim=1)
        upconv1_result = self.upConv1(x)  # upConv has completed the transpose conv, so the channel's number is halved

        upconv1_result = torch.cat([conv2_result, upconv1_result], dim=1)
        upconv2_result = self.upConv2(upconv1_result)

        upconv2_result = torch.cat([conv1_result, upconv2_result], dim=1)
        upconv3_result = self.upConv3(upconv2_result)

        final_conv_result = self.finalConv(upconv3_result)
        if self.if_activate:
            if self.output_channel > 1:
                result = self.softMax(final_conv_result)
            elif self.output_channel == 1:
                result = self.sigmoid(final_conv_result)
            else:
                my_logger.error("The number of output channel is wrong")
                result = None
        else:
            result = final_conv_result
        return result, final_conv_result


class DownConv3D(nn.Module):
    def __init__(self, input_channel, out_channel, padding, conv_kernel_size=3, conv_stride=1, pooling_kernel_size=2,
                 need_pooling=True):
        super(DownConv3D, self).__init__()
        # the first layer
        self.need_pooling = need_pooling
        self.conv1 = nn.Conv3d(in_channels=input_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding)
        self.BN1 = nn.BatchNorm3d(out_channel)
        self.Relu = nn.ReLU()
        self.conv2 = nn.Conv3d(in_channels=out_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding)
        self.BN2 = nn.BatchNorm3d(out_channel)
        # self.Relu2 = nn.ReLU()
        self.max_pooling1 = nn.MaxPool3d(kernel_size=pooling_kernel_size)

    def forward(self, x):
        if self.need_pooling:
            x = self.max_pooling1(x)
        x = self.conv1(x)
        x = self.BN1(x)
        x = self.Relu(x)
        x = self.conv2(x)
        x = self.BN2(x)
        x = self.Relu(x)
        return x


class UpConv3D(nn.Module):
    def __init__(self, input_channel, out_channel, padding, conv_kernel_size=3, conv_stride=1, up_conv_kernel_size=2,
                 if_up_conv=True, ):  # need_pooling=False, pooling_kernel_size=2
        super(UpConv3D, self).__init__()
        self.if_up_conv = if_up_conv
        # self.need_pooling = need_pooling
        # self.one_conv = nn.Conv2d(in_channels=out_channel, out_channels=1, kernel_size=1)
        # self.max_pooling1 = nn.MaxPool3d(kernel_size=pooling_kernel_size)

        self.conv1 = nn.Conv3d(in_channels=input_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding, bias=False)
        self.BN1 = nn.BatchNorm3d(out_channel)
        self.Relu = nn.ReLU()
        self.conv2 = nn.Conv3d(in_channels=out_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding, bias=False)
        self.BN2 = nn.BatchNorm3d(out_channel)
        # self.Relu2 = nn.ReLU()
        self.up_conv_trans1 = nn.ConvTranspose3d(in_channels=out_channel, out_channels=out_channel,
                                                 kernel_size=up_conv_kernel_size, stride=2)

    def forward(self, x):
        # if self.need_pooling:
        #     x = self.max_pooling1(x)
        x = self.conv1(x)
        x = self.BN1(x)
        x = self.Relu(x)
        x = self.conv2(x)
        x = self.BN2(x)
        x = self.Relu(x)
        if self.if_up_conv:
            x = self.up_conv_trans1(x)
        # else:
        #     x = self.one_conv(x)
        return x
