# -*- coding: utf-8 -*-
# @Time    : 2023/7/12 10:20
# @Author  : Pan
# @Software: PyCharm
# @Project : VisualFramework
# @FileName: UNetPan_v1.py


import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F


def SyncBatchNorm(*args, **kwargs):
    """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
    if paddle.get_device() == 'cpu' or os.environ.get(
            'PADDLESEG_EXPORT_STAGE') or 'xpu' in paddle.get_device(
            ) or 'npu' in paddle.get_device():
        return nn.BatchNorm2D(*args, **kwargs)
    elif paddle.distributed.ParallelEnv().nranks == 1:
        return nn.BatchNorm2D(*args, **kwargs)
    else:
        return nn.SyncBatchNorm(*args, **kwargs)


class DwConv2D(nn.Layer):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding, s=4, group_size=1):
        super(DwConv2D, self).__init__()
        self.up_conv = nn.Sequential(
            nn.Conv2D(in_channels, int(out_channels * s), 1, 1, 0),
            SyncBatchNorm(int(out_channels * s))
        )
        self.mid_conv = nn.Sequential(
            nn.Conv2D(int(out_channels * s), int(out_channels * s), kernel_size, stride, padding, groups=group_size),
            SyncBatchNorm(int(out_channels * s))
        )
        self.down_conv = nn.Conv2D(int(out_channels * s), out_channels, 1, 1, 0)

    def forward(self, x):
        return self.down_conv(self.mid_conv(self.up_conv(x)))


class Activation(nn.Layer):
    """
    The wrapper of activations.

    Args:
        act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu',
            'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid',
            'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax',
            'hsigmoid']. Default: None, means identical transformation.

    Returns:
        A callable object of Activation.

    Raises:
        KeyError: When parameter `act` is not in the optional range.

    Examples:

        from paddleseg.models.common.activation import Activation

        relu = Activation("relu")
        print(relu)
        # <class 'paddle.nn.layer.activation.ReLU'>

        sigmoid = Activation("sigmoid")
        print(sigmoid)
        # <class 'paddle.nn.layer.activation.Sigmoid'>

        not_exit_one = Activation("not_exit_one")
        # KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink',
        # 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax',
        # 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])"
    """

    def __init__(self, act=None):
        super(Activation, self).__init__()

        self._act = act
        upper_act_names = nn.layer.activation.__dict__.keys()
        lower_act_names = [act.lower() for act in upper_act_names]
        act_dict = dict(zip(lower_act_names, upper_act_names))

        if act is not None:
            if act in act_dict.keys():
                act_name = act_dict[act]
                self.act_func = eval("nn.layer.activation.{}()".format(
                    act_name))
            else:
                raise KeyError("{} does not exist in the current {}".format(
                    act, act_dict.keys()))

    def forward(self, x):
        if self._act is not None:
            return self.act_func(x)
        else:
            return x


class UpSampling(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 n_cat,
                 use_deconv=False,
                 align_corners=False):
        super(UpSampling, self).__init__()
        if use_deconv:
            self.up = nn.Conv2DTranspose(
                in_channels, out_channels, kernel_size=2, stride=2, padding=0)
        else:
            self.up = nn.Sequential(
                nn.Upsample(
                    scale_factor=2,
                    mode='bilinear',
                    align_corners=align_corners),
                nn.Conv2D(in_channels, out_channels, 1, 1, 0))

        self.conv = DoubleConv(n_cat * out_channels, out_channels)

    def forward(self, high_feature, *low_features):
        features = [self.up(high_feature)]
        for feature in low_features:
            features.append(feature)
        cat_features = paddle.concat(features, axis=1)
        out = self.conv(cat_features)
        return out


class DownSampling(nn.Layer):
    def __init__(self, func, in_channels, out_channels, kernel_size=2, stride=2, padding=2, shape=(64, 64)):
        super(DownSampling, self).__init__()
        if func == "max_pool":
            self.func = nn.MaxPool2D(kernel_size, stride)
        elif func == "avg_pool":
            self.func = nn.AvgPool2D(kernel_size, stride)
        elif func == "conv":
            self.func = nn.Sequential(
                            nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding=padding),
                            SyncBatchNorm(out_channels)
                        )
        elif func == "attention":
            self.func = nn.Sequential(
                Attention(in_channels, out_channels, stride=stride, out_size=shape),
                SyncBatchNorm(out_channels)
            )

    def forward(self, x):
        return self.func(x)


class Attention(nn.Layer):
    def __init__(self,
                 dim,
                 channels,
                 stride,
                 out_size=(32, 32),
                 num_heads=8,
                 qkv_bias=False,
                 qk_scale=None,
                 attn_drop=0.,
                 proj_drop=0.,
                 align_corners=False):
        super().__init__()
        self.num_heads = num_heads
        self.channels = channels
        self.out_size = out_size
        head_dim = channels // num_heads
        self.scale = qk_scale or head_dim**-0.5
        self.adapt_pool = nn.AdaptiveAvgPool2D(out_size)
        self.qkv = nn.Linear(dim, channels * 3, bias_attr=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(channels, channels)
        self.proj_drop = nn.Dropout(proj_drop)
        self.stride = stride
        self.align_corners = align_corners

        self.down_x = nn.Sequential(
            DwConv2D(dim, channels, 3, stride, padding=1, group_size=int(4 * channels)),
            SyncBatchNorm(channels)
        )

    def forward(self, x):
        shape = paddle.shape(x)

        y = self.adapt_pool(x)
        y = y.transpose([0, 2, 3, 1]).reshape([shape[0], self.out_size[0]*self.out_size[1], shape[1]])
        x_shape = paddle.shape(y)
        N, C = x_shape[1], self.channels
        qkv = self.qkv(y).reshape((-1, N, 3, self.num_heads, C //
                                   self.num_heads)).transpose((2, 0, 3, 1, 4))
        q, k, v = qkv[0], qkv[1], qkv[2]

        attn = (q.matmul(k.transpose((0, 1, 3, 2)))) * self.scale
        attn = nn.functional.softmax(attn, axis=-1)
        attn = self.attn_drop(attn)

        y = (attn.matmul(v)).transpose((0, 2, 1, 3)).reshape((-1, N, C))
        y = self.proj(y)
        y = self.proj_drop(y)
        y = y.reshape([shape[0], self.out_size[0], self.out_size[1], self.channels]).transpose([0, 3, 1, 2])
        y = F.interpolate(
                y,
                shape[2:] // self.stride,
                align_corners=self.align_corners)
        return self.down_x(x) + y


class DoubleConv_(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size=3,
                 stride=1,
                 padding=1):
        super(DoubleConv_, self).__init__()
        self.conv = nn.Sequential(
            # nn.Conv2D(in_channels, out_channels, filter_size, stride, padding),
            DwConv2D(in_channels, out_channels, filter_size, stride, padding, 4, int(4 * out_channels)),
            SyncBatchNorm(out_channels),
            nn.ReLU(),
            # nn.Conv2D(out_channels, out_channels, filter_size, stride, padding),
            DwConv2D(out_channels, out_channels, filter_size, stride, padding, 4, int(4 * out_channels)),
            SyncBatchNorm(out_channels), nn.ReLU())

    def forward(self, inputs):
        conv = self.conv(inputs)

        return conv


class DoubleConv(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size=3,
                 stride=1,
                 padding=1):
        super(DoubleConv, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2D(in_channels, out_channels, filter_size, stride, padding),
            SyncBatchNorm(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels, out_channels, filter_size, stride, padding),
            SyncBatchNorm(out_channels), nn.ReLU())

    def forward(self, inputs):
        conv = self.conv(inputs)

        return conv


class UNetPanV1(nn.Layer):
    """
    The UNet++ implementation based on PaddlePaddle.

    The original article refers to
    Zongwei Zhou, et, al. "UNet++: A Nested U-Net Architecture for Medical Image Segmentation"
    (https://arxiv.org/abs/1807.10165).

    Args:
        num_classes (int): The unique number of target classes.
        in_channels (int, optional): The channel number of input image. Default: 3.
        use_deconv (bool, optional): A bool value indicates whether using deconvolution in upsampling.
            If False, use resize_bilinear. Default: False.
        align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
            is even, e.g. 1024x512, otherwise it is True, e.g. 769x769.  Default: False.
        pretrained (str, optional): The path or url of pretrained model for fine tuning. Default: None.
        is_ds (bool): use deep supervision or not. Default: True
        """

    def __init__(self, configs):
        super(UNetPanV1, self).__init__()
        num_classes = configs["num_classes"] if "num_classes" in configs.keys() else 3
        in_channels = configs["in_channels"] if "in_channels" in configs.keys() else 3
        use_deconv = configs["use_deconv"] if "use_deconv" in configs.keys() else False
        down_sample = configs["down_sample"] if "down_sample" in configs.keys() else "conv"
        align_corners = configs["align_corners"] if "align_corners" in configs.keys() else False
        is_ds = configs["is_ds"] if "is_ds" in configs.keys() else True
        channels = configs["channels"] if "channels" in configs.keys() else [32, 64, 128, 256, 512]
        self.is_ds = is_ds

        self.conv0_0 = DoubleConv(in_channels, channels[0])
        self.pool_0 = DownSampling(down_sample, channels[0], channels[0], 2, 2, 0, (32, 32))
        self.conv1_0 = DoubleConv(channels[0], channels[1])
        self.pool_1 = DownSampling(down_sample, channels[1], channels[1], 2, 2, 0, (16, 16))
        self.conv2_0 = DoubleConv(channels[1], channels[2])
        self.pool_2 = DownSampling(down_sample, channels[2], channels[2], 2, 2, 0, (8, 8))
        self.conv3_0 = DoubleConv(channels[2], channels[3])
        self.pool_3 = DownSampling(down_sample, channels[3], channels[3], 2, 2, 0, (8, 8))
        self.conv4_0 = DoubleConv(channels[3], channels[4])

        self.up_cat0_1 = UpSampling(
            channels[1],
            channels[0],
            n_cat=2,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat1_1 = UpSampling(
            channels[2],
            channels[1],
            n_cat=2,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat2_1 = UpSampling(
            channels[3],
            channels[2],
            n_cat=2,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat3_1 = UpSampling(
            channels[4],
            channels[3],
            n_cat=2,
            use_deconv=use_deconv,
            align_corners=align_corners)

        self.up_cat0_2 = UpSampling(
            channels[1],
            channels[0],
            n_cat=3,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat1_2 = UpSampling(
            channels[2],
            channels[1],
            n_cat=3,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat2_2 = UpSampling(
            channels[3],
            channels[2],
            n_cat=3,
            use_deconv=use_deconv,
            align_corners=align_corners)

        self.up_cat0_3 = UpSampling(
            channels[1],
            channels[0],
            n_cat=4,
            use_deconv=use_deconv,
            align_corners=align_corners)
        self.up_cat1_3 = UpSampling(
            channels[2],
            channels[1],
            n_cat=4,
            use_deconv=use_deconv,
            align_corners=align_corners)

        self.up_cat0_4 = UpSampling(
            channels[1],
            channels[0],
            n_cat=5,
            use_deconv=use_deconv,
            align_corners=align_corners)

        self.out_1 = nn.Conv2D(channels[0], num_classes, 1, 1, 0)
        self.out_2 = nn.Conv2D(channels[0], num_classes, 1, 1, 0)
        self.out_3 = nn.Conv2D(channels[0], num_classes, 1, 1, 0)
        self.out_4 = nn.Conv2D(channels[0], num_classes, 1, 1, 0)

    def forward(self, inputs):
        # 0 down
        X0_0 = self.conv0_0(inputs)  # n,32,h,w
        pool_0 = self.pool_0(X0_0)  # n,32,h/2,w/2
        X1_0 = self.conv1_0(pool_0)  # n,64,h/2,w/2
        pool_1 = self.pool_1(X1_0)  # n,64,h/4,w/4
        X2_0 = self.conv2_0(pool_1)  # n,128,h/4,w/4
        pool_2 = self.pool_2(X2_0)  # n,128,h/8,n/8
        X3_0 = self.conv3_0(pool_2)  # n,256,h/8,w/8
        pool_3 = self.pool_3(X3_0)  # n,256,h/16,w/16
        X4_0 = self.conv4_0(pool_3)  # n,512,h/16,w/16

        # 1 up+concat
        X0_1 = self.up_cat0_1(X1_0, X0_0)  # n,32,h,w
        X1_1 = self.up_cat1_1(X2_0, X1_0)  # n,64,h/2,w/2
        X2_1 = self.up_cat2_1(X3_0, X2_0)  # n,128,h/4,w/4
        X3_1 = self.up_cat3_1(X4_0, X3_0)  # n,256,h/8,w/8

        # 2 up+concat
        X0_2 = self.up_cat0_2(X1_1, X0_0, X0_1)  # n,32,h,w
        X1_2 = self.up_cat1_2(X2_1, X1_0, X1_1)  # n,64,h/2,w/2
        X2_2 = self.up_cat2_2(X3_1, X2_0, X2_1)  # n,128,h/4,w/4

        # 3 up+concat
        X0_3 = self.up_cat0_3(X1_2, X0_0, X0_1, X0_2)  # n,32,h,w
        X1_3 = self.up_cat1_3(X2_2, X1_0, X1_1, X1_2)  # n,64,h/2,w/2

        # 4 up+concat
        X0_4 = self.up_cat0_4(X1_3, X0_0, X0_1, X0_2, X0_3)  # n,32,h,w

        # out conv1*1
        out_1 = self.out_1(X0_1)  # n,num_classes,h,w
        out_2 = self.out_2(X0_2)  # n,num_classes,h,w
        out_3 = self.out_3(X0_3)  # n,num_classes,h,w
        out_4 = self.out_4(X0_4)  # n,num_classes,h,w

        output = (out_1 + out_2 + out_3 + out_4) / 4

        if self.is_ds:
            return [output]
        else:
            return [out_4]


if __name__ == "__main__":
    model = UNetPanV1({"down_sample": "conv", "channels": [12, 24, 48, 96, 192]})
    # paddle.summary(model, (None, 3, 224, 224))
    paddle.flops(model, [1, 3, 512, 512])
