import torch
import torch.nn as nn

from models.BasicBlock import ConvBlock3d


class UpConv(nn.Module):
    def __init__(self, input_channel, output_channel, output_size=None):
        super(UpConv, self).__init__()
        self.conv = ConvBlock3d(input_channel, output_channel, if_pooling=False, if_resnet=False)
        self.output_size = output_size
        self.output_channel = output_channel
        self.output_block = nn.Sequential()
        self.output_block.add_module("upsample", nn.Upsample(size=self.output_size))
        self.output_block.add_module("conv", nn.Conv3d(self.output_channel, 1, 1))

    def forward(self, skip, input_x):

        x = nn.Upsample([2 * i for i in input_x.shape[2:]])(input_x)
        x = torch.cat((skip, x), dim=1)
        x = self.conv(x)
        if self.output_size[-1] != x.shape[-1]:
            output = self.output_block(x)
            return output, x
        else:
            return x, None


class BRAVENet(nn.Module):
    def __init__(self, input_channel, output_channel, big_patch_size, final_active="sigmoid"):
        super(BRAVENet, self).__init__()
        self.patch_size = [int(i / 2) for i in big_patch_size]
        self.final_active = final_active
        self.channel_list = [16, 32, 64, 128]
        self.loss_weights = [0.5, 0.25, 0.25]  # from top to bottom
        # The first encode path
        self.BN1 = nn.BatchNorm3d(input_channel)
        self.BN2 = nn.BatchNorm3d(input_channel)
        self.downConvA1 = ConvBlock3d(input_channel, self.channel_list[0])
        self.downConvA2 = ConvBlock3d(self.channel_list[0], self.channel_list[1])
        self.downConvA3 = ConvBlock3d(self.channel_list[1], self.channel_list[2])
        self.downConvA4 = ConvBlock3d(self.channel_list[2], self.channel_list[3], if_pooling=False)

        self.downConvB1 = ConvBlock3d(input_channel, self.channel_list[0])
        self.downConvB2 = ConvBlock3d(self.channel_list[0], self.channel_list[1])
        self.downConvB3 = ConvBlock3d(self.channel_list[1], self.channel_list[2])
        self.downConvB4 = ConvBlock3d(self.channel_list[2], self.channel_list[3], if_pooling=False)

        self.upConv1 = nn.Sequential()
        self.upConv1.add_module("upConv1_1",
                                nn.Conv3d(2 * self.channel_list[-1], self.channel_list[-1], kernel_size=(1, 1, 1),
                                          stride=1))
        self.upConv1.add_module("upConv1_1_active", nn.ReLU())
        self.upConv1.add_module("upConv1_2",
                                nn.Conv3d(self.channel_list[-1], self.channel_list[-1], kernel_size=(1, 1, 1),
                                          stride=1))

        self.upConv1.add_module("upConv1_2_active", nn.ReLU())
        self.upConv2 = UpConv(2 * self.channel_list[-1], self.channel_list[-2], self.patch_size)
        self.upConv3 = UpConv(2 * self.channel_list[-2], self.channel_list[-3], self.patch_size)
        self.upConv4 = UpConv(2 * self.channel_list[-3], self.channel_list[-4], self.patch_size)

        self.outputConv = nn.Conv3d(self.channel_list[-4], output_channel, 1)

        if self.final_active.lower() == "sigmoid":
            self.active_function = nn.Sigmoid()
        elif self.final_active.lower() == "softmax":
            self.active_function = nn.Softmax(dim=1)
        elif self.final_active is None:
            self.active_function = nn.Sequential()
        else:
            raise Exception("wrong active function parameter")

    def forward(self, x1, x2):
        """

        :param x1: x1 is the bigger one
        :param x2:
        :return:
        """
        x1 = nn.AvgPool3d(kernel_size=[2, 2, 2], stride=2)(x1)
        # x1 = self.BN1(x1)
        # x2 = self.BN2(x2)

        residualA1, x1 = self.downConvA1(x1)
        residualA2, x1 = self.downConvA2(x1)
        residualA3, x1 = self.downConvA3(x1)
        residualA4, x1 = self.downConvA4(x1)

        residualB1, x2 = self.downConvB1(x2)
        residualB2, x2 = self.downConvB2(x2)
        residualB3, x2 = self.downConvB3(x2)
        residualB4, x2 = self.downConvB4(x2)

        skip1 = torch.cat([residualA1, residualB1], dim=1)
        skip2 = torch.cat([residualA2, residualB2], dim=1)
        skip3 = torch.cat([residualA3, residualB3], dim=1)
        skip4 = torch.cat([residualA4, residualB4], dim=1)

        up_x1 = self.upConv1(skip4)
        deep_output1, up_x2 = self.upConv2(skip3, up_x1)
        deep_output2, up_x3 = self.upConv3(skip2, up_x2)
        deep_output3, _ = self.upConv4(skip1, up_x3)

        deep_output3 = self.outputConv(deep_output3)

        # deep_output1 = self.active_function(deep_output1)
        # deep_output2 = self.active_function(deep_output2)
        deep_output3 = self.active_function(deep_output3)
        return deep_output3

        # return [deep_output3, deep_output2, deep_output1]
