import torch
import torch.nn as nn


#  https://arxiv.org/pdf/1606.06650.pdf
class VNet3D(nn.Module):

    def __init__(self, input_channel, output_channel, if_softmax=True):
        super(VNet3D, self).__init__()
        self.if_softmax = if_softmax
        self.output_channel = output_channel

        self.downVNetConv1 = DownVNetConv(input_channel, 16, 1, padding=2)
        self.downVNetConv2 = DownVNetConv(16, 32, 2, padding=2)
        self.downVNetConv3 = DownVNetConv(32, 64, 3, padding=2)
        self.downVNetConv4 = DownVNetConv(64, 128, 3, padding=2)

        self.bottomVNetConv = BottomVNetConv(128, 256, 3, padding=2)

        self.upVNetConv1 = UpVNetConv(256, 256, 3, padding=2)
        self.upVNetConv2 = UpVNetConv(192, 128, 3, padding=2)
        self.upVNetConv3 = UpVNetConv(96, 64, 2, padding=2)
        self.upVNetConv4 = UpVNetConv(48, 32, 1, padding=2, if_upConv=False)

        self.oneConv = nn.Conv3d(in_channels=32, out_channels=output_channel, kernel_size=1, stride=1)

        self.softmax = nn.Softmax(output_channel)

    def forward(self, x):
        forwarding_1, down_conv1 = self.downVNetConv1(x)
        forwarding_2, down_conv2 = self.downVNetConv2(down_conv1)
        forwarding_3, down_conv3 = self.downVNetConv3(down_conv2)
        forwarding_4, down_conv4 = self.downVNetConv4(down_conv3)

        up_conv0 = self.bottomVNetConv(down_conv4)

        up_conv1 = self.upVNetConv1(forwarding_4, up_conv0)
        up_conv2 = self.upVNetConv2(forwarding_3, up_conv1)
        up_conv3 = self.upVNetConv3(forwarding_2, up_conv2)
        up_conv4 = self.upVNetConv4(forwarding_1, up_conv3, )
        one_conv = self.oneConv(up_conv4)
        final_output = self.softmax(one_conv)
        return final_output, one_conv


class BottomVNetConv(nn.Module):
    def __init__(self, input_channel, out_channel, conv_num, padding, conv_kernel_size=5, conv_stride=1,
                 ):
        super(BottomVNetConv, self).__init__()

        # the first layer
        self.conv1 = nn.Conv3d(in_channels=input_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding)

        self.restConv = nn.Sequential()
        for i in range(conv_num - 1):
            self.restConv.add_module("conv" + str(i + 1), nn.Conv3d(in_channels=out_channel, out_channels=out_channel,
                                                                    kernel_size=conv_kernel_size,
                                                                    stride=conv_stride, padding=padding))
            self.restConv.add_module("PReLU" + str(i + 1), nn.PReLU())
        self.upConv = nn.ConvTranspose3d(in_channels=out_channel, out_channels=out_channel//2, kernel_size=2,
                                         stride=2)
        self.PReLU = nn.PReLU()

    def forward(self, x):
        conv1 = self.conv1(x)
        conv1 = self.PReLU(conv1)
        rest_conv = self.restConv(conv1)
        x_ = torch.cat([x, x], dim=1) + rest_conv
        output = self.upConv(x_)
        output = self.PReLU(output)
        return output


class DownVNetConv(nn.Module):
    def __init__(self, input_channel, out_channel, conv_num, padding, conv_kernel_size=5, conv_stride=1,
                 ):
        super(DownVNetConv, self).__init__()

        # the first layer
        self.conv1 = nn.Conv3d(in_channels=input_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding)

        self.restConv = nn.Sequential()
        for i in range(conv_num - 1):
            self.restConv.add_module("conv" + str(i + 1), nn.Conv3d(in_channels=out_channel, out_channels=out_channel,
                                                                    kernel_size=conv_kernel_size,
                                                                    stride=conv_stride, padding=padding))
            self.restConv.add_module("PReLU" + str(i + 1), nn.PReLU())
        self.downConv = nn.Conv3d(in_channels=out_channel, out_channels=out_channel, kernel_size=2, stride=2)
        self.PReLU = nn.PReLU()

    def forward(self, x):
        conv1 = self.conv1(x)
        conv1 = self.PReLU(conv1)
        rest_conv = self.restConv(conv1)
        if x.shape[1] == 1:  # broadcast
            x_ = torch.add(x, rest_conv)
        else:
            x_ = torch.add(torch.cat([x, x], dim=1), rest_conv)
        output = self.downConv(x_)
        output = self.PReLU(output)
        return x_, output


class UpVNetConv(nn.Module):
    def __init__(self, input_channel, out_channel, conv_num, padding, conv_kernel_size=5, conv_stride=1,
                 if_upConv=True):
        super(UpVNetConv, self).__init__()
        self.ifUpConv = if_upConv
        self.conv1 = nn.Conv3d(in_channels=input_channel, out_channels=out_channel, kernel_size=conv_kernel_size,
                               stride=conv_stride, padding=padding)
        self.PReLU = nn.PReLU()
        # self.BN1 = nn.BatchNorm3d(out_channel)
        self.restConv = nn.Sequential()
        for i in range(conv_num - 1):
            self.restConv.add_module("conv" + str(i + 1), nn.Conv3d(in_channels=out_channel, out_channels=out_channel,
                                                                    kernel_size=conv_kernel_size,
                                                                    stride=conv_stride, padding=padding))
            self.restConv.add_module("PReLU" + str(i + 1), nn.PReLU())

        self.upConv = nn.ConvTranspose3d(in_channels=out_channel, out_channels=out_channel // 2, kernel_size=2,
                                         stride=2)

    def forward(self, left_x, down_x):
        x = torch.cat([left_x, down_x], dim=1)
        conv1 = self.conv1(x)
        conv1 = self.PReLU(conv1)
        rest_conv = self.restConv(conv1)
        x_sum = down_x + rest_conv
        if self.ifUpConv:
            output = self.upConv(x_sum)
            output = self.PReLU(output)
            return output
        else:
            return x_sum
