# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""NanoDet Based ShuffleNet-V2."""

import mindspore.common.dtype as mstype
import mindspore as ms
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.parallel._auto_parallel_context import auto_parallel_context
from mindspore.communication.management import get_group_size
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C

class Integral(nn.Cell):
    def __init__(self):
        super(Integral, self).__init__()
        self.softmax = P.Softmax(axis=-1)
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.linspace = Tensor([[0, 1, 2, 3, 4, 5, 6, 7]], mstype.float32)
        self.matmul = P.MatMul(transpose_b=True)

    def construct(self, x):
        """construct method"""
        x_shape = self.shape(x)
        x = self.reshape(x, (-1, 8))
        x = self.softmax(x)
        x = self.matmul(x, self.linspace)
        out_shape = x_shape[:-1] + (4,)
        x = self.reshape(x, out_shape)
        return x


class Distance2bbox(nn.Cell):
    def __init__(self):
        super(Distance2bbox, self).__init__()
        self.stack = P.Stack(-1)

    def construct(self, points, distance):
        """construct method"""
        x1 = points[..., 0] - distance[..., 0]
        y1 = points[..., 1] - distance[..., 1]
        x2 = points[..., 0] + distance[..., 2]
        y2 = points[..., 1] + distance[..., 3]
        return self.stack([x1, y1, x2, y2])


class BBox2Distance(nn.Cell):
    def __init__(self):
        super(BBox2Distance, self).__init__()
        self.stack = P.Stack(-1)

    def construct(self, points, bbox):
        """construct method"""
        left = points[..., 0] - bbox[..., 0]
        top = points[..., 1] - bbox[..., 1]
        right = bbox[..., 2] - points[..., 0]
        bottom = bbox[..., 3] - points[..., 1]
        left = C.clip_by_value(left, Tensor(0.0), Tensor(6.9))
        top = C.clip_by_value(top, Tensor(0.0), Tensor(6.9))
        right = C.clip_by_value(right, Tensor(0.0), Tensor(6.9))
        bottom = C.clip_by_value(bottom, Tensor(0.0), Tensor(6.9))
        return self.stack((left, top, right, bottom))


class QualityFocalLoss(nn.Cell):
    def __init__(self, beta=2.0, loss_weight=1.0):
        super(QualityFocalLoss, self).__init__()
        self.sigmoid = P.Sigmoid()
        self.sigmiod_cross_entropy = nn.BCEWithLogitsLoss(reduction='none')
        self.pow = P.Pow()
        self.abs = P.Abs()
        self.onehot = P.OneHot()
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.tile = P.Tile()
        self.expand_dims = P.ExpandDims()
        self.reduce_sum = P.ReduceSum()
        self.be = beta
        self.loss_weight = loss_weight

    def construct(self, logits, label, score, avg_factor):
        """construct method"""
        logits_sigmoid = self.sigmoid(logits)
        label = self.onehot(label, F.shape(logits)[-1], self.on_value, self.off_value)
        score = self.tile(self.expand_dims(score, -1), (1, F.shape(logits)[-1]))
        label = label * score
        sigmiod_cross_entropy = self.sigmiod_cross_entropy(logits, label)
        modulating_factor = self.pow(self.abs(label - logits_sigmoid), self.be)
        qfl_loss = sigmiod_cross_entropy * modulating_factor
        # qfl_loss = qfl_loss * weight
        qfl_loss = self.reduce_sum(qfl_loss) / avg_factor
        loss = self.loss_weight * qfl_loss
        return loss


class DistributionFocalLoss(nn.Cell):
    def __init__(self, loss_weight=1.0):
        super(DistributionFocalLoss, self).__init__()
        self.cross_entropy = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='none')
        self.cast = P.Cast()
        self.reduce_sum = P.ReduceSum()
        self.loss_weight = loss_weight
        self.reshape = P.Reshape()

    def construct(self, pred, label, weight, avg_factor):
        """construct method"""
        dis_left = self.cast(F.floor(label), mstype.int32)
        dis_right = dis_left + 1
        weight_left = self.cast(dis_right, mstype.float32) - label
        weight_right = label - self.cast(dis_left, mstype.float32)
        dfl_loss = self.cross_entropy(pred, dis_left) * weight_left + self.cross_entropy(pred, dis_right) * weight_right
        dfl_loss = self.reshape(dfl_loss, (-1,))
        dfl_loss = dfl_loss * weight
        dfl_loss = self.reduce_sum(dfl_loss) / avg_factor
        loss = self.loss_weight * dfl_loss
        return loss


class GIou(nn.Cell):
    """Calculating giou"""

    def __init__(self, loss_weight=1.0):
        super(GIou, self).__init__()
        self.reshape = P.Reshape()
        self.cast = P.Cast()
        self.reshape = P.Reshape()
        self.min = P.Minimum()
        self.max = P.Maximum()
        self.concat = P.Concat(axis=1)
        self.mean = P.ReduceMean()
        self.div = P.RealDiv()
        self.eps = 0.000001
        self.loss_weight = loss_weight
        self.reduce_sum = P.ReduceSum()

    def construct(self, box_p, box_gt, weight, avg_factor):
        """construct method"""
        box_p_area = (box_p[..., 2:3] - box_p[..., 0:1]) * (box_p[..., 3:4] - box_p[..., 1:2])
        box_gt_area = (box_gt[..., 2:3] - box_gt[..., 0:1]) * (box_gt[..., 3:4] - box_gt[..., 1:2])
        x_1 = self.max(box_p[..., 0:1], box_gt[..., 0:1])
        y_1 = self.max(box_p[..., 1:2], box_gt[..., 1:2])
        x_2 = self.min(box_p[..., 2:3], box_gt[..., 2:3])
        y_2 = self.min(box_p[..., 3:4], box_gt[..., 3:4])
        intersection = (y_2 - y_1) * (x_2 - x_1)
        xc_1 = self.min(box_p[..., 0:1], box_gt[..., 0:1])
        xc_2 = self.max(box_p[..., 2:3], box_gt[..., 2:3])
        yc_1 = self.min(box_p[..., 1:2], box_gt[..., 1:2])
        yc_2 = self.max(box_p[..., 3:4], box_gt[..., 3:4])
        c_area = (xc_2 - xc_1) * (yc_2 - yc_1)
        union = box_p_area + box_gt_area - intersection
        union = union + self.eps
        c_area = c_area + self.eps
        iou = self.div(self.cast(intersection, ms.float32), self.cast(union, ms.float32))
        res_mid0 = c_area - union
        res_mid1 = self.div(self.cast(res_mid0, ms.float32), self.cast(c_area, ms.float32))
        giou = iou - res_mid1
        giou = C.clip_by_value(giou, -1.0, 1.0)
        giou = 1 - giou
        giou = self.reshape(giou, (-1,))
        giou = giou * weight
        giou = self.reduce_sum(giou) / avg_factor
        loss = self.loss_weight * giou
        return loss


class Iou(nn.Cell):
    """Calculating iou"""

    def __init__(self):
        super(Iou, self).__init__()
        self.cast = P.Cast()
        self.min = P.Minimum()
        self.max = P.Maximum()
        self.div = P.RealDiv()
        self.eps = 0.000001

    def construct(self, box_p, box_gt):
        """construct method"""
        box_p_area = (box_p[..., 2:3] - box_p[..., 0:1]) * (box_p[..., 3:4] - box_p[..., 1:2])
        box_gt_area = (box_gt[..., 2:3] - box_gt[..., 0:1]) * (box_gt[..., 3:4] - box_gt[..., 1:2])
        x_1 = self.max(box_p[..., 0:1], box_gt[..., 0:1])
        x_2 = self.min(box_p[..., 2:3], box_gt[..., 2:3])
        y_1 = self.max(box_p[..., 1:2], box_gt[..., 1:2])
        y_2 = self.min(box_p[..., 3:4], box_gt[..., 3:4])

        w = self.max(x_2 - x_1, F.scalar_to_array(0.0))
        h = self.max(y_2 - y_1, F.scalar_to_array(0.0))

        intersection = w * h
        union = box_p_area + box_gt_area - intersection
        union = union + self.eps
        iou = self.div(self.cast(intersection, ms.float32), self.cast(union, ms.float32))
        iou = C.clip_by_value(iou, 0.0, 1.0)
        iou = iou.squeeze(-1)
        return iou


class NanoDetWithLossCell(nn.Cell):
    def __init__(self, network, config):
        super(NanoDetWithLossCell, self).__init__()
        self.network = network
        self.cast = P.Cast()
        self.reduce_sum = P.ReduceSum()
        self.reduce_mean = P.ReduceMean()
        self.less = P.Less()
        self.tile = P.Tile()
        self.expand_dims = P.ExpandDims()
        self.zeros = P.Zeros()
        self.ones = P.Ones()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.ones = P.Ones()
        self.iou = Iou()
        self.loss_bbox = GIou(loss_weight=2.0)
        self.loss_qfl = QualityFocalLoss(loss_weight=1.0)
        self.loss_dfl = DistributionFocalLoss(loss_weight=0.25)
        self.integral = Integral()
        self.distance2bbox = Distance2bbox()
        self.bbox2distance = BBox2Distance()
        self.sigmoid = P.Sigmoid()
        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.max = P.Maximum()
        self.stack = P.Stack(-1)
        self.ones_like = P.OnesLike()
        self.loss_zero = Tensor(0.0, dtype=mstype.float32)

    def construct(self, x, res_boxes, res_labels, res_center_priors, nums_match):
        bbox_preds, cls_scores = self.network(x)
        cls_scores = self.cast(cls_scores, mstype.float32)
        bbox_preds = self.cast(bbox_preds, mstype.float32)
        num_total_samples = self.reduce_sum(self.cast(nums_match, mstype.float32))
        losses_qfl = ()
        losses_bbox = ()
        losses_dfl = ()
        avg_factor = ()
        start_idx = 0
        for level, per_level in enumerate([1600, 400, 100]):
            end_idx = start_idx + per_level
            cls_scores_temp = self.reshape(cls_scores[:, start_idx:end_idx, :], (-1, 80))
            bbox_preds_temp = self.reshape(bbox_preds[:, start_idx:end_idx, :], (-1, 32))
            res_boxes_temp = self.reshape(res_boxes[:, start_idx:end_idx, :], (-1, 4))
            res_labels_temp = self.reshape(res_labels[:, start_idx:end_idx], (-1,))
            res_center_priors_temp = self.reshape(res_center_priors[:, start_idx:end_idx, :], (-1, 4))

            mask = self.cast(self.less(res_labels_temp, 80), mstype.float32)
            mask_bbox = self.tile(self.expand_dims(mask, -1), (1, 4))
            weight_targets = self.argmax(self.sigmoid(cls_scores_temp))[1] * mask
            weight_ones = self.ones_like(weight_targets) * mask

            decode_grid_cell_centers = (res_center_priors_temp / self.tile(
                self.expand_dims(res_center_priors_temp[..., 2], -1), (1, 4)))
            bbox_pred_corners = self.integral(bbox_preds_temp)
            decode_bbox_pred = self.distance2bbox(decode_grid_cell_centers, bbox_pred_corners)
            decode_bbox_target = res_boxes_temp / self.tile(
                self.expand_dims(res_center_priors_temp[..., 2], -1), (1, 4))

            # loss_bbox
            loss_bbox = self.loss_bbox(decode_bbox_pred, decode_bbox_target, weight_ones, num_total_samples)
            losses_bbox += (loss_bbox,)

            # loss_dfl
            pred_corners = self.reshape(bbox_preds_temp, (-1, 8))
            target_corners = self.reshape(self.bbox2distance(decode_grid_cell_centers, decode_bbox_target), (-1,))
            weight_targets_dfl = self.reshape(self.tile(self.expand_dims(weight_targets, -1), (1, 4)), (-1,))
            weight_dfl_ones = self.reshape(self.tile(self.expand_dims(weight_ones, -1), (1, 4)), (-1,))
            loss_dfl = self.loss_dfl(pred_corners, target_corners, weight_dfl_ones, num_total_samples)
            losses_dfl += (loss_dfl,)

            # loss_qfl
            score = self.iou(decode_bbox_pred, decode_bbox_target) * mask
            # op_shape = self.shape(score)
            loss_qlf = self.loss_qfl(cls_scores_temp, res_labels_temp, score, num_total_samples)
            losses_qfl += (loss_qlf,)

            avg_factor += (self.reduce_sum(weight_targets),)
            start_idx = end_idx

        avg_factor = self.stack(avg_factor)
        avg_factor = self.reduce_sum(avg_factor)

        losses_bbox = self.stack(losses_bbox)
        losses_dfl = self.stack(losses_dfl)
        losses_qfl = self.stack(losses_qfl)

        loss_qlf = self.reduce_sum(losses_qfl)
        loss_bbox = self.reduce_sum(losses_bbox)
        loss_dfl = self.reduce_sum(losses_dfl)

        loss = loss_qlf + loss_bbox + loss_dfl
        return loss


class DepthwiseConvModule(nn.Cell):
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            padding=0,
    ):
        super(DepthwiseConvModule, self).__init__()
        self.depthwise = nn.Conv2d(
            in_channels,
            in_channels,
            kernel_size,
            stride=stride,
            pad_mode='same',
            group=in_channels,
            has_bias=False,
            weight_init="normal"
        )
        self.pointwise = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=1,
            stride=1,
            pad_mode='same',
            has_bias=False,
            weight_init="normal")
        self.dwnorm = nn.BatchNorm2d(in_channels)
        self.pwnorm = nn.BatchNorm2d(out_channels)
        self.act = nn.LeakyReLU(alpha=0.1)

    def construct(self, x):
        x = self.depthwise(x)
        x = self.dwnorm(x)
        x = self.act(x)
        x = self.pointwise(x)
        x = self.pwnorm(x)
        x = self.act(x)
        return x


class FlattenConcat(nn.Cell):
    """
    Concatenate predictions into a single tensor.

    Args:
        config (dict): The default config of nanodet.

    Returns:
        Tensor, flatten predictions.
    """

    def __init__(self, config):
        super(FlattenConcat, self).__init__()
        self.num_nanodet_boxes = config.num_nanodet_boxes
        self.concat = P.Concat(axis=1)
        self.transpose = P.Transpose()

    def construct(self, inputs):
        output = ()
        batch_size = F.shape(inputs[0])[0]
        for x in inputs:
            x = self.transpose(x, (0, 2, 3, 1))
            output += (F.reshape(x, (batch_size, -1)),)
        res = self.concat(output)
        return F.reshape(res, (batch_size, self.num_nanodet_boxes, -1))



def ShareClassRegModel(in_channel, num_anchors, kernel_size=3,
                       stride=1, pad_mod='same', num_classes=81, feature_size=96):
    conv2 = DepthwiseConvModule(feature_size, feature_size, kernel_size=3)
    conv3 = DepthwiseConvModule(feature_size, feature_size, kernel_size=3)
    return nn.SequentialCell([conv2, conv3])


class MultiBox(nn.Cell):
    """
    Multibox conv layers. Each multibox layer contains class conf scores and localization predictions.

    Args:
        config (dict): The default config of nanodet.

    Returns:
        Tensor, localization predictions.
        Tensor, class conf scores.
    """

    def __init__(self, config):
        super(MultiBox, self).__init__()

        out_channels = config.extras_out_channels
        num_default = config.num_default
        loc_layers = []
        cls_layers = []
        share_layers = []
        for k, out_channel in enumerate(out_channels):
            loc_layers += [nn.Conv2d(in_channels=out_channel, out_channels=32,
                                     kernel_size=1,
                                     has_bias=True,
                                     weight_init="Normal",
                                     )]
            cls_layers += [nn.Conv2d(in_channels=out_channel, out_channels=80,
                                     kernel_size=1,
                                     has_bias=True,
                                     weight_init="Normal",
                                     bias_init=-4.595)]
            share_layers += [ShareClassRegModel(in_channel=out_channel, num_anchors=num_default[k],
                                                num_classes=config.num_classes)]

        self.multi_share_layers = nn.layer.CellList(share_layers)
        self.multi_loc_layers = nn.layer.CellList(loc_layers)
        self.multi_cls_layers = nn.layer.CellList(cls_layers)
        self.flatten_concat = FlattenConcat(config)

    def construct(self, inputs):
        loc_outputs = ()
        cls_outputs = ()
        for i in range(len(self.multi_loc_layers)):
            share_output = self.multi_share_layers[i](inputs[i])
            loc_outputs += (self.multi_loc_layers[i](share_output),)
            cls_outputs += (self.multi_cls_layers[i](share_output),)
        return self.flatten_concat(loc_outputs), self.flatten_concat(cls_outputs)


class TrainingWrapper(nn.Cell):
    """
    Encapsulation class of nanodet network training.

    Append an optimizer to the training network after that the construct
    function can be called to create the backward graph.

    Args:
        network (Cell): The training network. Note that loss function should have been added.
        optimizer (Optimizer): Optimizer for updating the weights.
        sens (Number): The adjust parameter. Default: 1.0.
    """

    def __init__(self, network, optimizer, sens=1.0):
        super(TrainingWrapper, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.weights = ms.ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.grad_reducer = None
        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
            self.reducer_flag = True
        if self.reducer_flag:
            mean = context.get_auto_parallel_context("gradients_mean")
            if auto_parallel_context().get_device_num_is_set():
                degree = context.get_auto_parallel_context("device_num")
            else:
                degree = get_group_size()
            self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)

    def construct(self, *args):
        weights = self.weights
        loss = self.network(*args)
        sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
        grads = self.grad(self.network, weights)(*args, sens)
        if self.reducer_flag:
            # apply grad reducer on grads
            grads = self.grad_reducer(grads)
        self.optimizer(grads)
        return loss


class ShuffleV2Block(nn.Cell):
    def __init__(self, inp, oup, stride):
        super(ShuffleV2Block, self).__init__()
        self.stride = stride
        branch_features = oup // 2
        if self.stride > 1:
            self.branch1 = nn.SequentialCell([
                self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
                nn.BatchNorm2d(inp),
                nn.Conv2d(inp, branch_features, kernel_size=1, padding=0, pad_mode='pad', has_bias=False),
                nn.BatchNorm2d(branch_features),
                nn.LeakyReLU(alpha=0.1),
            ])
        else:
            self.branch1 = nn.SequentialCell()

        self.branch2 = nn.SequentialCell([
            nn.Conv2d(
                inp if (self.stride > 1) else branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                has_bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.LeakyReLU(alpha=0.1),
            self.depthwise_conv(
                branch_features,
                branch_features,
                kernel_size=3,
                stride=self.stride,
                padding=1,
            ),
            nn.BatchNorm2d(branch_features),
            nn.Conv2d(
                branch_features,
                branch_features,
                kernel_size=1,
                stride=1,
                padding=0,
                has_bias=False,
            ),
            nn.BatchNorm2d(branch_features),
            nn.LeakyReLU(alpha=0.1),
        ])

    @staticmethod
    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
        return nn.Conv2d(i, o, kernel_size, stride, "pad", padding, group=i, has_bias=bias)

    def construct(self, x):
        if self.stride == 1:
            x1, x2 = P.Split(axis=1, output_num=2)(x)
            out = P.Concat(axis=1)((x1, self.branch2(x2)))
        else:
            out = P.Concat(axis=1)((self.branch1(x), self.branch2(x)))
        out = channel_shuffle(out, 2)
        return out


def channel_shuffle(x, groups):
    batchsize, num_channels, height, width = x.shape
    channels_per_group = num_channels // groups
    x = P.Reshape()(x, (batchsize, groups, channels_per_group, height, width))
    x = P.Transpose()(x, (0, 2, 1, 3, 4))
    x = P.Reshape()(x, (batchsize, -1, height, width))
    return x


class ShuffleNetV2(nn.Cell):
    def __init__(self, model_size='1.0x'):
        super(ShuffleNetV2, self).__init__()
        print('model size is ', model_size)

        self.stage_repeats = [4, 8, 4]
        self.model_size = model_size
        if model_size == '0.5x':
            self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
        elif model_size == '1.0x':
            self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
        elif model_size == '1.5x':
            self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
        elif model_size == '2.0x':
            self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
        elif model_size == '3.0x':
            self.stage_out_channels = [-1, 24, 512, 1024, 2048, 2048]
        else:
            raise NotImplementedError

        # building first layer
        input_channel = self.stage_out_channels[1]
        self.conv1 = nn.SequentialCell([
            nn.Conv2d(in_channels=3, out_channels=input_channel, kernel_size=3, stride=2,
                      pad_mode='pad', padding=1, has_bias=False),
            nn.BatchNorm2d(num_features=input_channel, momentum=0.9),
            nn.LeakyReLU(alpha=0.1),
        ])

        self.pad = nn.Pad(((0, 0), (0, 0), (1, 1), (1, 1)), "CONSTANT")
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)

        self.features = []
        for idxstage in range(len(self.stage_repeats)):
            feature = []
            numrepeat = self.stage_repeats[idxstage]
            output_channel = self.stage_out_channels[idxstage + 2]

            for i in range(numrepeat):
                if i == 0:
                    feature.append(ShuffleV2Block(input_channel, output_channel, stride=2))
                else:
                    feature.append(ShuffleV2Block(output_channel, output_channel, stride=1))

                input_channel = output_channel
            self.features.append(feature)
        self.stage2 = nn.SequentialCell([*self.features[0]])
        self.stage3 = nn.SequentialCell([*self.features[1]])
        self.stage4 = nn.SequentialCell([*self.features[2]])
        self._initialize_weights()

    def construct(self, x):
        x = self.conv1(x)
        x = self.pad(x)
        x = self.maxpool(x)
        C2 = self.stage2(x)
        C3 = self.stage3(C2)
        C4 = self.stage4(C3)
        return C2, C3, C4

    def _initialize_weights(self):
        param_dict = load_checkpoint("/home/work/user-job-dir/V0001/shufflenetV2_x1.ckpt")
        load_param_into_net(self, param_dict)
        print("shufflenetV2 init done!")


def shuffleNet(model_size='1.0x'):
    return ShuffleNetV2(model_size)


class NanoDet(nn.Cell):
    def __init__(self, backbone, config, is_training=True):
        super(NanoDet, self).__init__()
        self.backbone = backbone
        feature_size = config.feature_size
        self.P5_1 = nn.Conv2d(464, 96, kernel_size=1, stride=1, pad_mode='same',
                              has_bias=True,
                              weight_init="xavier_uniform",
                              bias_init="zeros")
        self.P_upsample1 = P.ResizeBilinear((feature_size[1], feature_size[1]))
        self.P4_1 = nn.Conv2d(232, 96, kernel_size=1, stride=1, pad_mode='same',
                              has_bias=True,
                              weight_init="xavier_uniform",
                              bias_init="zeros")
        self.P_upsample2 = P.ResizeBilinear((feature_size[0], feature_size[0]))
        self.P_downSample1 = P.ResizeBilinear((feature_size[1], feature_size[1]))
        self.P_downSample2 = P.ResizeBilinear((feature_size[2], feature_size[2]))
        self.P3_1 = nn.Conv2d(116, 96, kernel_size=1, stride=1, pad_mode='same',
                              has_bias=True,
                              weight_init="xavier_uniform",
                              bias_init="zeros")
        self.multi_box = MultiBox(config)
        self.is_training = is_training
        if not is_training:
            self.activation = P.Sigmoid()

    def construct(self, x):
        C3, C4, C5 = self.backbone(x)
        P3 = self.P3_1(C3)
        P4 = self.P4_1(C4)
        P5 = self.P5_1(C5)

        # build top-down path
        P5_upsampled = self.P_upsample1(P5)
        P4 = P5_upsampled + P4
        P4_upsampled = self.P_upsample2(P4)
        P3 = P4_upsampled + P3

        # bottom-up path
        P3_downSampled = self.P_downSample1(P3)
        P4 = P3_downSampled + P4
        P4_downSampled = self.P_downSample2(P4)
        P5 = P4_downSampled + P5

        multi_feature = (P3, P4, P5)
        pred_loc, pred_label = self.multi_box(multi_feature)
        return pred_loc, pred_label


class NanoDetInferWithDecoder(nn.Cell):
    """
    nanodet Infer wrapper to decode the bbox locations.

    Args:
        network (Cell): the origin nanodet infer network without bbox decoder.
        default_boxes (Tensor): the default_boxes from anchor generator
        config (dict): nanodet config
    Returns:
        Tensor, the locations for bbox after decoder representing (x0, y0, x1, y1)
        Tensor, the prediction labels.

    """

    def __init__(self, network, default_boxes, config):
        super(NanoDetInferWithDecoder, self).__init__()
        self.network = network
        self.distribution_project = Integral()
        self.center_priors = default_boxes
        self.sigmoid = P.Sigmoid()
        self.expandDim = P.ExpandDims()
        self.tile = P.Tile()
        self.shape = P.Shape()
        self.stack = P.Stack(-1)

    def construct(self, x, max_shape=None):
        x_shape = self.shape(x)
        default_priors = self.expandDim(self.center_priors, 0)
        reg_preds, cls_preds = self.network(x)
        dis_preds = self.distribution_project(reg_preds) * self.tile(self.expandDim(default_priors[..., 3], -1),
                                                                     (1, 1, 4))
        bboxes = self.distance2bbox(default_priors[..., :2], dis_preds, max_shape)
        scores = self.sigmoid(cls_preds)
        return bboxes, scores

    def distance2bbox(self, points, distance, max_shape=None):
        x1 = points[..., 0] - distance[..., 0]
        y1 = points[..., 1] - distance[..., 1]
        x2 = points[..., 0] + distance[..., 2]
        y2 = points[..., 1] + distance[..., 3]
        if max_shape is not None:
            x1 = C.clip_by_value(x1, Tensor(0.0), Tensor(320.0))
            y1 = C.clip_by_value(y1, Tensor(0.0), Tensor(320.0))
            x2 = C.clip_by_value(x2, Tensor(0.0), Tensor(320.0))
            y2 = C.clip_by_value(y2, Tensor(0.0), Tensor(320.0))
        return self.stack([x1, y1, x2, y2])
