import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
torch.set_default_dtype(torch.float32)

def bn_act(num_features, act=True):
    if act == True:
        return nn.Sequential(
            nn.BatchNorm2d(num_features),
            nn.ReLU(inplace=True)
        )
    else:
        return nn.BatchNorm2d(num_features)

class Conv2dAuto(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super(Conv2dAuto, self).__init__(*args, **kwargs)
        # dynamic add padding based on the kernel_size
        self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)

class ConvBlock(BaseModel):
    def __init__(self, in_channels, out_channels, kernel_size = 3, stride = 1, ):
        super(ConvBlock, self).__init__()
        self.module = torch.nn.ModuleDict()
        self.module['bn_act'] = bn_act(in_channels)
        self.module['conv'] = Conv2dAuto(in_channels, out_channels, kernel_size = kernel_size,
                                        stride = stride)

    def forward(self, x):
        for _, op in self.module.items():
            x = op(x)
        return x

class ResBlock(BaseModel):
    def __init__(self, in_channels, out_channels, kernel_size = 3, stride = 1, first=False):
        super(ResBlock, self).__init__()
        self.module = torch.nn.ModuleDict()
        if first == True:
            self.module['conv_block_1'] = Conv2dAuto(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)
        else:
            self.module['conv_block_1'] = ConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)
        self.module['conv_block_2'] = ConvBlock(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride = 1)
        self.module['shortcut'] = nn.Sequential(
            Conv2dAuto(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride),
            bn_act(out_channels, False)
        )

    def forward(self, x):
        out = self.module['conv_block_1'](x)
        out = self.module['conv_block_2'](out)

        shortcut = self.module['shortcut'](x)

        out += shortcut
        return out

class UpBlock(BaseModel):
    def __init__(self, in_channels, out_channels, kernel_size = 3, stride = 1):
        super(UpBlock, self).__init__()
        self.module = nn.ModuleDict()
        self.module['upsample'] = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        self.module['res_block'] = ResBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1)

    def forward(self, x, x_skip):
        x = self.module['upsample'](x)
        x = torch.cat([x, x_skip], dim=1)

        x = self.module['res_block'](x)
        return x

class ResUnet2D(BaseModel):
    def __init__(self, enc_size = [1, 16, 32, 64, 128, 256], n_classes=1):
        super().__init__()

        self.res_block_0 = ResBlock(in_channels=enc_size[0], out_channels=enc_size[1], kernel_size=3, stride=1, first=True)
        enc_size = enc_size[1:]
        self.res_block_1 = ResBlock(in_channels=enc_size[0], out_channels=enc_size[1], kernel_size=3, stride=2)
        self.res_block_2 = ResBlock(in_channels=enc_size[1], out_channels=enc_size[2], kernel_size=3, stride=2)
        self.res_block_3 = ResBlock(in_channels=enc_size[2], out_channels=enc_size[3], kernel_size=3, stride=2)
        self.res_block_4 = ResBlock(in_channels=enc_size[3], out_channels=enc_size[4], kernel_size=3, stride=2)

        self.bridge = nn.Sequential(
            ConvBlock(enc_size[-1], enc_size[-1]),
            ConvBlock(enc_size[-1], enc_size[-1])
        )

        # dec_size = enc_size[::-1]

        self.up_block_1 = UpBlock(self.calc_in_channels_up_block(enc_size[4], enc_size), enc_size[4])
        self.up_block_2 = UpBlock(self.calc_in_channels_up_block(enc_size[3], enc_size), enc_size[3])
        self.up_block_3 = UpBlock(self.calc_in_channels_up_block(enc_size[2], enc_size), enc_size[2])
        self.up_block_4 = UpBlock(self.calc_in_channels_up_block(enc_size[1], enc_size), enc_size[1])

        self.output = nn.Sequential(
            Conv2dAuto(in_channels=enc_size[1], out_channels=n_classes, kernel_size=1, stride=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        d0 = self.res_block_0(x) # 16
        d1 = self.res_block_1(d0) # 32
        d2 = self.res_block_2(d1) # 64
        d3 = self.res_block_3(d2) # 128
        d4 = self.res_block_4(d3) # 256

        b = self.bridge(d4) # 256

        u1 = self.up_block_1(b, d3) # 256, 128 -> 256
        u2 = self.up_block_2(u1, d2) # 256, 64 -> 128
        u3 = self.up_block_3(u2, d1) # 128, 32 -> 64
        u4 = self.up_block_4(u3, d0) # 64, 16 -> 32

        out = self.output(u4)

        return out

    @classmethod
    def calc_in_channels_up_block(cls, out_channels, dec_size):
        idx = dec_size.index(out_channels)
        idx1 = idx - 1
        if idx1 < 0:
            idx1 = 0
        idx2 = idx + 1
        if idx2 > len(dec_size) - 1:
            idx2 = len(dec_size) - 1
        return dec_size[idx1] + dec_size[idx2]
