import torch
import torch.nn as nn
import torch.nn.functional as F
# from utils import init_weights


class DownBlock(nn.Module):
    def __init__(self, in_ch, out_ch, is_first_block=False, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True):
        super(DownBlock, self).__init__()

        block = [] if is_first_block else [nn.MaxPool2d(kernel_size=2)]
        for _ in range(nConv):
            block += [nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding)]
            block += [nn.BatchNorm2d(out_ch)] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_ch
            
        self.block = nn.Sequential(*block)

    def forward(self, x):
        return self.block(x)


class UpBlock(nn.Module):
    def __init__(self, in_ch, out_ch, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True):
        super(UpBlock, self).__init__()

        out_chs = []
        out_ch_ = in_ch
        for n in range(nConv):
            out_ch_ = out_ch_//2
            if out_ch_ >= out_ch: out_chs.append(out_ch_)
            else: out_chs.append(out_ch)

        block = []
        for n in range(nConv):
            block += [nn.Conv2d(in_ch, out_chs[n], kernel_size, stride, padding)]
            block += [nn.BatchNorm2d(out_chs[n])] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_chs[n]
            
        self.block = nn.Sequential(*block)

    def forward(self, x, x_skip):
        x = nn.UpsamplingBilinear2d(scale_factor=2)(x)
        x = torch.cat([x, x_skip], dim=1)
        return self.block(x)


class UNet(nn.Module):
    def __init__(self, in_ch=3, out_ch=1, nConv=2, nBlock=5, ch=64, kernel_size=3, stride=1, padding=1, **kwargs):
        super(UNet, self).__init__()
        self.nBlock = nBlock
        self.out_ch = out_ch

        in_chs = [in_ch] + [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-1,0,-1)] + [ch]
        out_chs = [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-2,-1,-1)] + [ch, out_ch]
        # [3,  64,  128, 256, 512, 1024, 512, 256, 128, 64]
        # [64, 128, 256, -512, -512, 256,  128, 64,  64,  10]
        # 256  128  64   32   16   32    64   128  256  256

        # [3,  32,  64, 128, 256, 512, 256, 128, 64, 32]
        # [32, 64, 128, 256, 512, 256, 128, 64,  32,  n]
        # 160  80   40   20   10   20   40  80  160  160
        # 112  56   28   14   7    14   28  56  112  112
        # 240  120  60   30   15   30   60  120 240  240

        self.downBlocks, self.upBlocks = [], []
        for i in range(nBlock):
            self.downBlocks += [DownBlock(in_chs[i], out_chs[i], is_first_block=i==0, nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        for i in range(nBlock-1):
            self.upBlocks += [UpBlock(in_chs[i+nBlock], out_chs[i+nBlock], nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        
        self.downBlocks = nn.ModuleList(self.downBlocks)
        self.upBlocks = nn.ModuleList(self.upBlocks)
        self.last_conv = nn.Conv2d(in_chs[-1], out_chs[-1], 1, bias=False)

        # initialise weights
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         init_weights(m, init_type='kaiming')
        #     elif isinstance(m, nn.BatchNorm2d):
        #         init_weights(m, init_type='kaiming')

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'U-Net param_count: {self.param_count}')

    def forward(self, x):
        x_skip = []
        i = 0
        for downBlock in self.downBlocks:
            x = downBlock(x)
            if i<self.nBlock-1:
                x_skip.append(x)
            i += 1
        for upBlock in self.upBlocks:
            x = upBlock(x, x_skip.pop())
            
        return self.last_conv(x)


class Self_Attn(nn.Module):
    """ Self attention Layer"""
    def __init__(self,in_dim,activation):
        super(Self_Attn,self).__init__()
        self.chanel_in = in_dim
        self.activation = activation
        
        self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
        self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
        self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
        self.gamma = nn.Parameter(torch.zeros(1))

        self.softmax  = nn.Softmax(dim=-1) #
    def forward(self,x):
        """
            inputs :
                x : input feature maps( B X C X W X H)
            returns :
                out : self attention value + input feature 
                attention: B X N X N (N is Width*Height)
        """
        m_batchsize,C,width ,height = x.size()
        proj_query  = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
        proj_key =  self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
        energy =  torch.bmm(proj_query,proj_key) # transpose check
        attention = self.softmax(energy) # BX (N) X (N) 
        proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N

        out = torch.bmm(proj_value,attention.permute(0,2,1) )
        out = out.view(m_batchsize,C,width,height)
        
        out = self.gamma*out + x
        return out,attention


class UNet_attn(nn.Module):
    def __init__(self, in_ch=3, out_ch=1, nConv=2, nBlock=5, ch=64, kernel_size=3, stride=1, padding=1, **kwargs):
        super(UNet_attn, self).__init__()
        self.nBlock = nBlock
        self.out_ch = out_ch

        in_chs = [in_ch] + [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-1,0,-1)] + [ch]
        out_chs = [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-2,-1,-1)] + [ch, out_ch]
        # [3,  64,  128, 256, 512, 1024, 512, 256, 128, 64]
        # [64, 128, 256, 512, 512, 256,  128, 64,  64,  10]
        # 256  128  64   32   16   32    64   128  256  256

        self.downBlocks, self.upBlocks = [], []
        for i in range(nBlock):
            self.downBlocks += [DownBlock(in_chs[i], out_chs[i], is_first_block=i==0, nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        for i in range(nBlock-1):
            self.upBlocks += [UpBlock(in_chs[i+nBlock], out_chs[i+nBlock], nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        
        self.downBlocks = nn.ModuleList(self.downBlocks)
        self.upBlocks = nn.ModuleList(self.upBlocks)
        self.last_conv = nn.Conv2d(in_chs[-1], out_chs[-1], 1)

        # initialise weights
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         init_weights(m, init_type='kaiming')
        #     elif isinstance(m, nn.BatchNorm2d):
        #         init_weights(m, init_type='kaiming')

        self.attn16 = Self_Attn(512, 'relu')
        self.attn32 = Self_Attn(256, 'relu')
        self.attn64 = Self_Attn(128, 'relu')

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'U-Net param_count: {self.param_count}')

    def forward(self, x):
        x_skip = []
        i = 0
        for downBlock in self.downBlocks:
            x = downBlock(x)
            if i<self.nBlock-1:
                x_skip.append(x)
            i += 1

        # # 16
        # x = self.attn16(x)[0]

        # # 32
        # x = self.upBlocks[0](x, x_skip.pop())
        # x = self.attn32(x)[0]

        for upBlock in self.upBlocks:
            x = upBlock(x, x_skip.pop())
            if x.shape[2] == 16:
                x = self.attn16(x)[0]
            if x.shape[2] == 32:
                x = self.attn32(x)[0]
            if x.shape[2] == 64:
                x = self.attn64(x)[0]
            
        return self.last_conv(x)


class SegNet_S(nn.Module):
    def __init__(self, repre_ch, mask_ch, size='S'):
        super(SegNet_S, self).__init__()

        self.channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}

        if size=='S':
            out_chs =  [128, 64, 64, 32, mask_ch]
            dilations = [1, 2, 1, 2, 1]
            paddings =  [0, 2, 1, 2, 1]
            kernels =  [1, 3, 3, 3, 3]
        elif size=='L':
            out_chs =   [128, 64, 64, 64, 64, 64, 64, 32, mask_ch]
            dilations = [1, 2, 4, 8, 1, 2, 4, 8, 1]
            paddings =  [0, 2, 4, 8, 1, 2, 4, 8, 1]
            kernels =   [1, 3, 3, 3, 3, 3, 3, 3, 3]

        in_ch = repre_ch
        self.layers = nn.ModuleList()
        for out_ch, kernel, dilation, padding in zip(out_chs, kernels, dilations, paddings):
            self.layers.append(
                nn.Conv2d(
                    in_ch, 
                    out_ch, 
                    kernel_size=kernel, 
                    dilation=dilation, 
                    padding=padding
                )
            )
            self.layers.append(nn.LeakyReLU(inplace=True))
            in_ch = out_ch

        # # initialise weights
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         init_weights(m, init_type='kaiming')
        #     elif isinstance(m, nn.BatchNorm2d):
        #         init_weights(m, init_type='kaiming')

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'SegNet param_count: {self.param_count}')

    def forward(self, input):
        for l in self.layers[:-1]:
            input = l(input)
        return input


# =======================================================================
import math


class Conv2dStaticSamePadding(nn.Module):
    """
    created by Zylo117
    The real keras/tensorflow conv2d with same padding
    """

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, groups=1, dilation=1, **kwargs):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
                              bias=bias, groups=groups)
        self.stride = self.conv.stride
        self.kernel_size = self.conv.kernel_size
        self.dilation = self.conv.dilation

        if isinstance(self.stride, int):
            self.stride = [self.stride] * 2
        elif len(self.stride) == 1:
            self.stride = [self.stride[0]] * 2

        if isinstance(self.kernel_size, int):
            self.kernel_size = [self.kernel_size] * 2
        elif len(self.kernel_size) == 1:
            self.kernel_size = [self.kernel_size[0]] * 2

    def forward(self, x):
        h, w = x.shape[-2:]
        
        extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
        extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]
        
        left = extra_h // 2
        right = extra_h - left
        top = extra_v // 2
        bottom = extra_v - top

        x = F.pad(x, [left, right, top, bottom])

        x = self.conv(x)
        return x


class MaxPool2dStaticSamePadding(nn.Module):
    """
    created by Zylo117
    The real keras/tensorflow MaxPool2d with same padding
    """

    def __init__(self, *args, **kwargs):
        super().__init__()
        self.pool = nn.MaxPool2d(*args, **kwargs)
        self.stride = self.pool.stride
        self.kernel_size = self.pool.kernel_size

        if isinstance(self.stride, int):
            self.stride = [self.stride] * 2
        elif len(self.stride) == 1:
            self.stride = [self.stride[0]] * 2

        if isinstance(self.kernel_size, int):
            self.kernel_size = [self.kernel_size] * 2
        elif len(self.kernel_size) == 1:
            self.kernel_size = [self.kernel_size[0]] * 2

    def forward(self, x):
        h, w = x.shape[-2:]
        
        extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
        extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]

        left = extra_h // 2
        right = extra_h - left
        top = extra_v // 2
        bottom = extra_v - top

        x = F.pad(x, [left, right, top, bottom])

        x = self.pool(x)
        return x


class SwishImplementation(torch.autograd.Function):
    @staticmethod
    def forward(ctx, i):
        result = i * torch.sigmoid(i)
        ctx.save_for_backward(i)
        return result

    @staticmethod
    def backward(ctx, grad_output):
        i = ctx.saved_variables[0]
        sigmoid_i = torch.sigmoid(i)
        return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))


class MemoryEfficientSwish(nn.Module):
    def forward(self, x):
        return SwishImplementation.apply(x)


class Swish(nn.Module):
    def forward(self, x):
        return x * torch.sigmoid(x)

class SeparableConvBlock(nn.Module):
    """
    created by Zylo117
    """

    def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False):
        super(SeparableConvBlock, self).__init__()
        if out_channels is None:
            out_channels = in_channels

        # Q: whether separate conv
        #  share bias between depthwise_conv and pointwise_conv
        #  or just pointwise_conv apply bias.
        # A: Confirmed, just pointwise_conv applies bias, depthwise_conv has no bias.

        self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels,
                                                      kernel_size=3, stride=1, groups=in_channels, bias=False)
        self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1)

        self.norm = norm
        if self.norm:
            # Warning: pytorch momentum is different from tensorflow's, momentum_pytorch = 1 - momentum_tensorflow
            self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3)

        self.activation = activation
        if self.activation:
            self.swish = MemoryEfficientSwish() if not onnx_export else Swish()

    def forward(self, x):
        x = self.depthwise_conv(x)
        x = self.pointwise_conv(x)

        if self.norm:
            x = self.bn(x)

        if self.activation:
            x = self.swish(x)

        return x


class BiFPNBlock(nn.Module):
    def __init__(self, num_channels=64, height=7, epsilon=1e-4, onnx_export=False, attention=True, last_block=False, norm=True, self_attention=False, out_ch=17):
        super(BiFPNBlock, self).__init__()
        assert height>2
        self.height = height
        self.epsilon = epsilon
        self.last_block = last_block

        self.upsampleConvBlocks = nn.ModuleList()
        for _ in range(height-2):
            self.upsampleConvBlocks += [SeparableConvBlock(num_channels, onnx_export=onnx_export, norm=norm)]
        if last_block:
            self.upsampleConvBlocks += [SeparableConvBlock(num_channels, out_ch, onnx_export=onnx_export, norm=norm)]
        else:
            self.upsampleConvBlocks += [SeparableConvBlock(num_channels, onnx_export=onnx_export, norm=norm)]

        if attention:
            self.upsampleWeights = nn.Parameter(torch.ones([height-1, 2], dtype=torch.float32), requires_grad=True)
        else:
            self.upsampleWeights = torch.ones([height-1, 2], dtype=torch.float32)

        if not last_block:
            self.downsampleConvBlocks = nn.ModuleList()
            for _ in range(height-1):
                self.downsampleConvBlocks += [SeparableConvBlock(num_channels, onnx_export=onnx_export, norm=norm)]

            if attention:
                self.downsampleWeights = nn.Parameter(torch.ones([height-1, 3], dtype=torch.float32), requires_grad=True)
            else:
                self.downsampleWeights = torch.ones([height-1, 3], dtype=torch.float32)

        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.downsample = MaxPool2dStaticSamePadding(3, 2)

        self.swish = MemoryEfficientSwish() if not onnx_export else Swish()

        if last_block:
            self_attention = False
        # self.sa = self_attention
        # if self_attention:
        #     self.attn = Self_Attn(64, 'relu')

    def forward(self, x):

        # first half
        x_mid = []
        x_mid += [x[0]]

        for h in range(1, self.height):
            weight = nn.ReLU()(self.upsampleWeights[h-1])
            weight = weight / (torch.sum(weight) + self.epsilon)
            # if self.sa and x[h].shape[2] == 64:
            if False:
                x_mid += [
                    self.upsampleConvBlocks[h-1](
                        self.attn(
                            self.swish(
                                weight[0] * x[h] +
                                weight[1] * self.upsample(x_mid[-1])
                            )
                        )[0]
                    )
                ]
            else:
                x_mid += [
                    self.upsampleConvBlocks[h-1](self.swish(
                        weight[0] * x[h] +
                        weight[1] * self.upsample(x_mid[-1])
                    ))
                ]
        
        if self.last_block:
            return x_mid[-1]

        # second half
        x_out = []
        x_out += [x_mid[-1]]

        for h in range(self.height-2, 0, -1):
            weight = nn.ReLU()(self.downsampleWeights[h])
            weight = weight / (torch.sum(weight) + self.epsilon)
            x_out += [
                self.downsampleConvBlocks[h](self.swish(
                    weight[0] * x[h] +
                    weight[1] * x_mid[h] + 
                    weight[2] * self.downsample(x_out[-1])
                ))
            ]
            
        weight = nn.ReLU()(self.downsampleWeights[0])
        weight = weight / (torch.sum(weight) + self.epsilon)
        x_out += [
            self.downsampleConvBlocks[0](self.swish(
                weight[0] * x[0] +
                weight[1] * self.downsample(x_out[-1])
            ))
        ]

        return x_out[::-1]

# [512, 512, 512, 512, 256, 128, 64]*64 = 159744
# 5.5blocks * 12convs * (64*3*3+64*64*1*1) = 308352
# 64*3*1*1 = 192

class BiFPN(nn.Module):
    def __init__(
        self,
        num_channels=64,
        out_ch=1,
        height=7,
        n_block=3,
        epsilon=1e-4,
        onnx_export=False,
        attention=True,
        chns=[512, 512, 512, 512, 256, 128, 64],
        self_attention=False
    ):
        super(BiFPN, self).__init__()
        assert len(chns) == height
        self.height = height
        self.n_block = n_block

        channels_of_layer = {}
        for i in range(len(chns)):
            channels_of_layer[2**(i+2+(7-height))] = chns[i]
        print(channels_of_layer)

        self.first_convs = nn.ModuleList()
        for h in range(7-height, 7):
            self.first_convs += [
                nn.Sequential(
                    Conv2dStaticSamePadding(channels_of_layer[2**(h+2)], num_channels, 1),
                    nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )]

        self.BiFPN_blocks = nn.ModuleList()
        for _ in range(n_block):
            self.BiFPN_blocks += [
                BiFPNBlock(num_channels, height, epsilon=epsilon, onnx_export=onnx_export, attention=attention, self_attention=self_attention)
            ]
        
        self.last_block = nn.Sequential(
            BiFPNBlock(num_channels, height, epsilon=epsilon, onnx_export=onnx_export, attention=attention, last_block=True, out_ch=out_ch),
            # MemoryEfficientSwish(),
            # Conv2dStaticSamePadding(num_channels, out_ch, 1),
        )

        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'BiFPN param_count: {self.param_count}')

    def forward(self, input):
        # {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
        #                           |
        #                           V
        # {4:64, 8:64, 16:64, 32:64, 64:64, 128:64, 256:64}
        for h in range(self.height):
            input[h] = self.first_convs[h](input[h])
        
        for n in range(self.n_block):
            input = self.BiFPN_blocks[n](input)

        input = self.last_block(input)

        return input

if __name__ == '__main__':
    # x = torch.randn(1,12,256,256)
    # net = SegNet_S(12)
    # x = net(x)
    # print(x.shape)
    # a = SegNet_S(2496, 17, 'L')
    # b = BiFPN(out_ch=17, n_block=7)
    UNet(1, out_ch=17)