import json
import os.path
import time
import zipfile

import numpy as np
import requests
from mindspore import FileWriter, Tensor

data_path = './dataset/pedestrian_detection'
epochs = 10
checkpoint_file_path = 'pretrained_ssd300.ckpt'
checkpoint_path = './checkpoints'
epoch_size = 1
pre_trained = os.path.join(checkpoint_path, checkpoint_file_path)
mode_sink = ''
voc_json = os.path.join(data_path, 'classes.json')
load_ckpt = True
batch_size = 2


def download_and_unzip(url, path):
    if not os.path.exists(path):
        os.mkdir(path)
    file_path = os.path.join(path, 'case10.zip')
    start = time.time()
    user, passwor = 'tyx_neu', 'Sportlab307'
    resp = requests.get(url, auth=(user, passwor), stream=True)

    size = 0
    chunk_size = 1024
    content_size = int(resp.headers['content-length'])
    try:
        if resp.status_code == 200:
            print('start download [file size]:{size:.2f}MB'.format(size=content_size / chunk_size / 1024))
            with open(file_path, 'wb') as fiel:
                for data in resp.iter_content(chunk_size=chunk_size):
                    fiel.write(data)
                    size += len(data)
                    print(
                        '\r [下载进度]:%s%.2f%%' % ('>' * int(size * 50 / content_size), float(size / content_size * 100)),
                        end=' ')
        end = time.time()
        print('\r download completed time:%.2f秒' % (end - start))
    except ValueError:
        print('error')

    unzip_file_path = path
    if not os.path.exists(unzip_file_path):
        os.mkdir(unzip_file_path)
    zip_file = zipfile.ZipFile(file_path)
    zip_list = zip_file.namelist()
    for f in zip_list:
        zip_file.extract(f, unzip_file_path)
    zip_file.close()
    print('successfully unzip')


url = 'https://openi.pcl.ac.cn/attachments/486f8fa8-b022-4399-8049-d89c28e695ba?type=1'
download_and_unzip(url, os.path.join(os.getcwd(), 'dataset'))

import xml.etree.ElementTree as et

classes = ['background', 'person']
voc_json = os.path.join(data_path, 'classes.json')


def get_imageId_from_fileName(filename, id_iter):
    filename = os.path.splitext(filename)[0]
    if filename.isdigit():
        return int(filename)
    return id_iter


def create_voc_label(is_training, is_testing):
    voc_root = data_path
    cls_map = {name: i for i, name in enumerate(classes)}
    if is_training:
        sub_dir = 'train'
    elif is_testing:
        sub_dir = 'test'
    else:
        sub_dir = 'eval'
    voc_dir = os.path.join(voc_root, sub_dir)

    if not os.path.isdir(os.path.join(voc_dir, 'Images')):
        image_dir = os.path.join(voc_dir, 'Images')
    if os.path.isdir(os.path.join(voc_dir, 'Annotations')):
        anno_dir = os.path.join(voc_dir, 'Annotations')

    if not (is_training or is_testing):
        json_file = os.path.join(voc_root, voc_json)
        file_dir = os.path.split(json_file)[0]
        if not os.path.isdir(file_dir):
            os.makedirs(file_dir)
        json_dict = {'images': [], 'type': 'instances', 'annotations': [], 'categories': []}

        bnd_id = 1
    image_files_dict = {}
    image_anno_dict = {}
    images = []
    id_iter = 0
    for anno_file in os.listdir(anno_dir):
        print(anno_file)
        if not anno_file.endswith('xml'):
            continue
        tree = et.parse(os.path.join(anno_dir, anno_file))
        root_node = tree.getroot()
        file_name = root_node.find('filename').text
        img_id = get_imageId_from_fileName(file_name, id_iter)
        id_iter += 1
        image_path = os.path.join(image_dir, file_name)
        print(image_path)

        if not os.path.isfile(image_path):
            print(f'connot find image {file_name} according to annotations.')
            continue

        labels = []
        for obj in root_node.iter('object'):
            cls_name = obj.find('name').text
            if cls_name not in cls_map:
                print(f'label"{cls_name}"not in "{classes}"')
                continue
            bnd_box = obj.find('bndbox')
            x_min = int(float(bnd_box.find('xmin').text)) - 1
            y_min = int(float(bnd_box.find('ymin').text)) - 1
            x_max = int(float(bnd_box.find('xmax').text)) - 1
            y_max = int(float(bnd_box.find('ymax').text)) - 1
            labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])

            if not (is_training or is_testing):
                o_width = abs(x_max - x_min)
                o_height = abs(y_max - y_min)
                ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id': img_id,
                       'bbox': [x_min, y_min, o_width, o_height], 'category_id': cls_map[cls_name], 'id': bnd_id,
                       'ignore': 0, 'segmentation': []}
                json_dict['annotations'].append(ann)
                bnd_id = bnd_id + 1

        if labels:
            images.append(img_id)
            image_files_dict[img_id] = image_path
            image_anno_dict[img_id] = np.array(labels)

        if not (is_training or is_testing):
            size = root_node.find('size')
            width = int(size.find('width').text)
            height = int(size.find('height').text)
            image = {'file_name': file_name, 'height': height, 'width': width, 'id': img_id}
            json_dict['images'].append(image)
    if not (is_training or is_testing):
        for cls_name, cid in cls_map.items():
            cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}
            json_dict['categories'].append(cat)
        json_fp = open(json_file, 'w')
        json_str = json.dumps(json_dict)
        json_fp.write(json_str)
        json_fp.close()
    return images, image_files_dict, image_anno_dict


def voc_data_to_mindrecord(mindrecord_dir, is_testing, is_training, prefix='ssd.mindrecord', file_num=8):
    mindrecord_path = os.path.join(mindrecord_dir, prefix)
    writer = FileWriter(mindrecord_path, file_num)
    images, image_path_dict, image_anno_dict = create_voc_label(is_training, is_testing)

    ssd_json = {
        'img_id': {'type': 'int32', "shape": [1]},
        'image': {'type': 'bytes'},
        'annotation': {'type': 'int32', 'shape': [-1, 5]}}
    writer.add_schema(ssd_json, 'ssd_json')

    for img_id in images:
        image_path = image_path_dict[img_id]
        with open(image_path, 'rb') as f:
            img = f.read()
        annos = np.array(image_anno_dict[img_id], dtype=np.int32)
        img_id = np.array([img_id], dtype=np.int32)
        row = {'img_id': img_id, 'image': img, 'annotation': annos}
        gan([row])

    writer.commit()


def create_mindrecord(dataset='voc', prefix='ssd.mindrecord', is_training=True, is_testing=False):
    print('Start create dataset')

    mindrecord_dir = data_path
    print(mindrecord_dir)
    mindrecord_file = os.path.join(mindrecord_dir, prefix + '0')
    if not os.path.exists(mindrecord_file):
        if not os.path.isdir(mindrecord_dir):
            os.makedirs(mindrecord_dir)
        if dataset == 'voc':
            voc_root = data_path
            if os.path.isdir(voc_root):
                print('create mindrecord.')
                voc_data_to_mindrecord(mindrecord_dir, is_training=is_training, prefix=prefix, is_testing=is_testing)
                print('create mindrecord done,at {}'.format(mindrecord_dir))
            else:
                print('voc_root not exits.')
    return mindrecord_file


def intersect(box_a, box_b):
    max_yx = np.minimum(box_a[:, 2:4], box_b[2:4])
    min_yx = np.maximum(box_a[:, :2], box_b[:2])
    inter = np.clip((max_yx - min_yx), a_min=0, a_max=np.inf)
    return inter[:, 0] * inter[:, 1]


def jaccard_numpy(box_a, box_b):
    inter = intersect(box_a, box_b)
    area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]))
    area_b = ((box_b[2] - box_b[0]) * (box_b[3] - box_b[1]))
    union = area_a + area_b - inter
    return inter / union


def _rand(a=0, b=1.):
    return np.random.rand() * (b - a) + a


def random_sampel_crop(image, boxes):
    height, width, _ = image.shape
    min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])

    if min_iou is None:
        return image, boxes

    for _ in range(50):
        image_t = image
        w = _rand(0.3, 1.0) * width
        h = _rand(0.3, 1.0) * height

        if h / w < 0.5 or h / w > 2:
            continue
        left = _rand() * (width - w)
        top = _rand() * (height - h)

        rect = np.array([int(top), int(left), int(top + h), int(left + w)])
        overlap = jaccard_numpy(boxes, rect)

        drop_mask = overlap > 0
        if not drop_mask.any():
            continue

        if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):
            continue

        image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]
        centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0

        m1 = (rect[0] < centers[:, 0] * (rect[1] < centers[:, 1]))
        m2 = (rect[2] > centers[:, 0] * (rect[3] > centers[:, 1]))

        mask = m1 * m2 * drop_mask
        if not mask.any():
            continue

        boxes_t = boxes[mask, :].copy()
        boxes_t[:, 2] = np.maximum(boxes_t[:, 2], rect[:2])
        boxes_t[:, 2] -= rect[:2]
        boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4])
        boxes_t[:, 2:4] -= rect[:2]
        return image_t, boxes_t
    return image, boxes


import itertools as it
import math

num_ssd_boxes = 1917
steps = [16, 32, 64, 100, 150, 300]
img_shape = [300, 300]
match_threshold = 0.5
min_scale = 0.2
max_scale = 0.95
aspect_ratios = [[], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]]
num_default = [3, 6, 6, 6, 6, 6]
feature_sizes = [19, 10, 5, 3, 2, 1]


class GeneratDefaultBoxes():
    def __init__(self):
        fk = img_shape[0] / np.array(steps)
        scale_rate = (max_scale - min_scale) / (len(num_default) - 1)
        scales = [min_scale + scale_rate * i for i in range(len(num_default))] + [1.0]
        self.default_boxes = []
        for idex, feature_size in enumerate(feature_sizes):
            sk1 = scales[idex]
            sk2 = scales[idex + 1]
            sk3 = math.sqrt(sk1 * sk2)
            if idex == 0 and not aspect_ratios[idex]:
                w, h = sk1 * math.sqrt(2), sk1 / math.sqrt(2)
                all_sizes = [(0.1, 0.1), (w, h), (h, w)]
            else:
                all_sizes = [(sk1, sk1)]
                for aspect_ratio in aspect_ratios[idex]:
                    w, h = sk1 * math.sqrt(aspect_ratio), sk1 / math.sqrt(aspect_ratio)
                    all_sizes.append((w, h))
                    all_sizes.append((h, w))
                all_sizes.append((sk3, sk3))

            assert len(all_sizes) == num_default[idex]

            for i, j in it.product(range(feature_size), repeat=2):
                for w, h in all_sizes:
                    cx, cy = (j + 0.5) / fk[idex], (i + 0.5) / fk[idex]
                    self.default_boxes.append([cy, cx, h, w])

        def to_tlbr(cy, cx, h, w):
            return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2

        self.default_boxes_tlbr = np.array(tuple(to_tlbr(*i) for i in self.default_boxes), dtype='float32')
        self.default_boxes = np.array(self.default_boxes, dtype='float32')


default_boxes_tlbr = GeneratDefaultBoxes().default_boxes_tlbr
default_boxes = GeneratDefaultBoxes().default_boxe
y1, x1, y2, x2 = GeneratDefaultBoxes().default_boxes
vol_anchors = (x2 - x1) * (y2 - y1)
matching_threshold = match_threshold

prior_scaling = [0.1, 0.2]


def ssd_bboxes_encode(boxes):
    def jaccard_with_anchors(bbox):
        ymin = np.maximum(y1, bbox[0])
        xmin = np.maximum(x1, bbox[1])
        ymax = np.minimum(y2, bbox[2])
        xmax = np.minimum(x2, bbox[3])

        w = np.maximum(xmax - xmin, 0.0)
        h = np.maximum(ymax - ymin, 0.0)

        inter_vol = h * w
        union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol
        jaccard = inter_vol / union_vol
        return np.squeeze(jaccard)

    pre_scores = np.zeros((num_ssd_boxes), dtype=np.float32)
    t_boxes = np.zeros((num_ssd_boxes, 4), dtype=np.float32)
    t_label = np.zeros((num_ssd_boxes), dtype=np.int64)
    for bbox in boxes:
        label = int(bbox[4])
        scores = jaccard_with_anchors(bbox)
        idx = np.argmax(scores)
        scores[idx] = 2.0
        mask = (scores > matching_threshold)
        mask = mask & (scores > pre_scores)
        pre_scores = np.maximum(pre_scores, scores * mask)
        t_label = mask * label + (1 - mask) * t_label
        for i in range(4):
            t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i]

    index = np.nonzero(t_label)

    bboxes = np.zeros((num_ssd_boxes, 4), dtype=np.float32)
    bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2
    bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]]

    bboxes_t = bboxes[index]
    default_boxes_t = default_boxes[index]

    bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * prior_scaling[0])
    tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001)

    bboxes_t[:, 2:4] = np.log(tmp) / prior_scaling[1]
    bboxes[index] = bboxes_t

    num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32)
    return bboxes, t_label.astype(np.int32), num_match


import cv2


def preprocess_fn(img_id, image, box, is_training):
    cv2.setNumThreads(2)

    def _infer_data(image, input_shape):
        img_h, img_w, _ = image.shape
        input_h, input_w = input_shape

        image = cv2.resize(image, (input_w, input_h))

        if len(image.shape) == 2:
            image = np.expand_dims(image, axis=-1)
            image = np.concatenate([image, image, image], axis=-1)

        return img_id, image, np.array((img_h, img_w), np.float32)

    def _data_aug(image, box, is_training, image_size=(300, 300)):
        ih, iw, _ = image.shape
        h, w = image_size

        if not is_training:
            return _infer_data(image, image_size)

        box = box.astype(np.float32)
        image, box = random_sampel_crop(image, box)
        ih, iw, _ = image.shape

        image = cv2.resize(image, (w, h))

        flip = _rand() < 0.5
        if flip:
            image = cv2.flip(image, 1, dst=None)

        if len(image.shape) == 2:
            image = np.expand_dims(image, axis=-1)
            image = np.concatenate([image, image, image], axis=-1)

        box[:, [0, 2]] = box[:, [0, 2]] / ih
        box[:, [1, 3]] = box[:, [1, 3]] / iw

        if flip:
            box[:, [1, 3]] = 1 - box[:, [3, 1]]
        box, label, num_match = ssd_bboxes_encode(box)
        return image, box, label, num_match

    return _data_aug(image, box, is_training, image_size=[300, 300])


import multiprocessing
import mindspore.dataset as de


def create_ssd_dataset(mindrecord_file, batch_size=1, device_num=1, rank=0, is_training=True, num_parallel_workers=4,
                       use_multiprocessing=True, is_testing=False):
    cores = multiprocessing.cpu_count()
    if cores < num_parallel_workers:
        print('the num_parallel_workers {} is set too large,now set it {}'.format(num_parallel_workers, cores))
        num_parallel_workers = cores

    if is_training:
        ds = de.MindDataset(mindrecord_file, columns_list=['img_id', 'image', 'annotation'],
                            num_shards=device_num, shard_id=rank, num_parallel_workers=num_parallel_workers)
    elif is_testing:
        ds = de.MindDataset(mindrecord_file, columns_list=['img_id', 'image', 'annotation'], num_shards=device_num,
                            shard_id=rank, num_parallel_workers=num_parallel_workers)
    else:
        ds = de.MindDataset(mindrecord_file, columns_list=['img_id', 'image', 'annotation'], num_shards=device_num,
                            shard_id=rank, num_parallel_workers=num_parallel_workers)

    decode = de.vision.c_transforms.Decode()
    ds.map(operations=decode, input_columns=['image'])
    change_swap_op = de.vision.c_transforms.HWC2CHW()
    normalize_op = de.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
    color_adjust_op = de.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)

    if is_training:
        compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))
    elif is_testing:
        compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_testing))
    else:
        compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))

    if is_training:
        output_columns = ['image', 'box', 'label', 'num_match']
        trans = [color_adjust_op, normalize_op, change_swap_op]
    elif is_testing:
        output_columns = ['image', 'box', 'label', 'num_match']
        trans = [color_adjust_op, normalize_op, change_swap_op]
    else:
        out_columns = ['img_id', "image", 'image_shape']
        trans = [normalize_op, change_swap_op]
    ds = ds.map(operations=compose_map_func, input_columns=['img_id', 'image', 'annotation'],
                output_columns=output_columns, column_order=output_columns,
                python_multiprocessing=use_multiprocessing,
                num_parallel_workers=num_parallel_workers)
    ds = ds.map(operations=trans, input_columns=['image'], python_multiprocess=use_multiprocessing,
                num_parallel_workers=num_parallel_workers)

    ds = ds.batch(batch_size, drop_remainder=True)
    return ds


rank = 0
device_num = 1
loss_scale = 1.0
from mindspore import context

device_target = context.get_context('device_target')
mindrecord_file = create_mindrecord('voc', 'ssd.mindrecord', is_training=True, is_testing=False)
use_multiprocessing = (device_target != 'CPU')

if device_target == 'CPU':
    print('because your device target is cpu, no multiprocessing ')
dataset = create_ssd_dataset(mindrecord_file, batch_size=batch_size, device_num=device_num, rank=rank,
                             use_multiprocessing=use_multiprocessing)
dataset_size = dataset.get_dataset_size()
print(f'Create dataset done! dataset size is {dataset_size}')

import mindspore.nn as nn


def _bn(channel):
    return nn.BatchNorm2d(channel, eps=1e-3, momentum=0.97, gamma_init=1, beta_init=0, moving_mean_init=0,
                          moving_var_init=1)


def _conv2d(in_channel, out_channel, kernel_size=3, stride=1, pad_mod='same'):
    return nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=0, pad_mod=pad_mod,
                     has_bias=True)


class ConvBNReLU(nn.Cell):
    def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, shared_conv=None):
        super(ConvBNReLU, self).__init__()
        padding = 0
        in_channels = in_planes
        out_channels = out_planes
        if shared_conv is None:
            if groups == 1:
                conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='same', padding=padding)
            else:
                out_channels = in_planes
                conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='same', padding=padding,
                                 groups=in_channels)
            layers = [conv, _bn(out_channels), nn.ReLU6()]
        else:
            layers = [shared_conv, _bn(out_planes), nn.ReLU6()]
        self.features = nn.SequentialCell(layers)

    def construct(self, x):
        output = self.features(x)
        return output


import mindspore.ops as ops


class InvertedResidual(nn.Cell):
    def __init__(self, inp, oup, stride, expand_ratio, last_relu=False):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        layers.extend([
            ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
            nn.Conv2d(hidden_dim, oup, kernel_size=1, stride=1, has_bias=False)
            , _bn(oup)
        ])
        self.conv = nn.SequentialCell(layers)
        self.cast = ops.Cast()
        self.last_relu = last_relu
        self.relu = nn.ReLU6()

    def construct(self, x):
        identity = x
        x = self.conv(x)
        if self.use_res_connect:
            x = identity + x
        if self.last_relu:
            x = self.relu(x)
        return x


def _make_divisible(v, divisor, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


class SSDWithMobileNetV2(nn.Cell):
    def __init__(self, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
        super(SSDWithMobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280

        if inverted_residual_setting is None:
            inverted_residual_setting = [
                [1, 15, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1]
            ]
        if len(inverted_residual_setting[0]) != 4:
            raise ValueError('inverted_residual_setting should be non-empty or a'
                             ' 4-element list ,got{}'.format(inverted_residual_setting))

        input_channel = _make_divisible(input_channel * width_mult, round_nearest)
        self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
        features = [ConvBNReLU(3, input_channel, stride=2)]

        layer_index = 0
        for t, c, n, s in inverted_residual_setting:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                if layer_index == 13:
                    hidden_dim = int(round(input_channel * t))
                    self.expand_layer_conv_13 = ConvBNReLU(input_channel, hidden_dim, kernel_size=1)
                stride = s if i == 0 else 1
                features.append(block(input_channel, output_channel, stride, expand_ratio=t))
                input_channel = output_channel
                layer_index += 1
        features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))

        self.features_1 = nn.SequentialCell(features[:14])
        self.features_2 = nn.SequentialCell(features[14:])

    def construct(self, x):
        out = self.features_1(x)
        expand_layer_conv_13 = self.expand_layer_conv_13(out)
        out = self.features_2(out)

        return expand_layer_conv_13, out

    def get_out_channels(self):
        return self.last_channel


def ssd_mobilenet_v2(**kwargs):
    return SSDWithMobileNetV2(**kwargs)


class FlattenConcat(nn.Cell):
    def __init__(self):
        super(FlattenConcat, self).__init__()
        self.num_ssd_boxes = 1917
        self.concat = ops.concat(axis=1)
        self.transpose = ops.Transpose()

    def construct(self, inputs):
        output = ()
        batch_size = ops.shape(inputs[0])[0]
        for x in inputs:
            x = self.transpose(x, (0, 2, 3, 1))
            output += (ops.reshape(x, (batch_size, -1)),)
        res = self.concat(output)
        return ops.reshape(res, (batch_size, self.num_ssd_boxes, -1))


def _last_conv2d(in_channel, out_channel, kernel_size=3, stride=1, pad_mod='same', pad=0):
    in_channels = in_channel
    out_channels = in_channel
    depthwise_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, pad_mode='same', padding=pad,
                               group=in_channels)
    conv = _conv2d(in_channel, out_channel, kernel_size=1)

    return nn.SequentialCell([depthwise_conv, _bn(in_channel), nn.ReLU6(), conv])


class MultiBox(nn.Cell):
    def __init__(self):
        super(MultiBox, self).__init__()
        num_classes = 81
        out_channels = [576, 1280, 512, 256, 256, 128]
        num_default = [3, 6, 6, 6, 6, 6]

        loc_layers = []
        cls_layers = []
        for k, out_channel in enumerate(out_channels):
            loc_layers += [_last_conv2d(out_channel, 4 * num_default[k],
                                        kernel_size=3, stride=1, pad_mod='same', pad=0)]
            cls_layers += [_last_conv2d(out_channel, num_classes * num_default[k],
                                        kernel_size=3, stride=1, pad_mod='same', pad=0)]
        self.multi_loc_layers = nn.layer.CellList(loc_layers)
        self.multi_cls_layers = nn.layer.CellList(cls_layers)
        self.flatten_concat = FlattenConcat()

    def construct(self, inputs):
        loc_outputs = ()
        cls_outputs = ()
        for i in range(len(self.multi_loc_layers)):
            loc_outputs += (self.multi_loc_layers[i](inputs(i)),)
            cls_outputs += (self.multi_cls_layers[i](inputs(i)),)
        return self.flatten_concat(loc_outputs), self.flatten_concat(cls_outputs)


model_name = 'ssd300'

import mindspore as ms


class SSD300(nn.Cell):
    def __init__(self, backbone, is_training=True):
        super(SSD300, self).__init__()

        self.backbone = backbone
        in_channels = [256, 576, 1280, 512, 256, 256]
        out_channels = [576, 1280, 512, 256, 256, 128]
        ratios = [0.2, 0.2, 0.2, 0.25, 0.5, 0.25]
        strides = [1, 1, 2, 2, 2, 2]
        residual_list = []
        for i in range(2, len(in_channels)):
            residual = InvertedResidual(in_channels[i], out_channels[i], stride=strides[i],
                                        expand_ratio=ratios[i], last_relu=True)
            residual_list.append(residual)
        self.multi_residual = nn.layer.CellList(residual_list)
        self.multi_box == MultiBox()
        self.is_training = is_training
        if not is_training:
            self.activation = ops.Sigmoid()

    def construct(self, x):
        layer_out_13, output = self.backbone(x)
        multi_feature = (layer_out_13, output)
        feature = output
        for residual in self.multi_residual:
            feature = residual(feature)
            multi_feature += (feature,)
        pre_loc, pred_label = self.multi_box(multi_feature)
        if not self.is_training:
            pred_label = self.activation(pred_label)
        pre_loc = ops.cast(pre_loc, ms.float32)
        pred_label = ops.cast(pre_loc, ms.float32)
        return pre_loc, pred_label


from mindspore.common.initializer import initializer, TruncatedNormal


def init_net_param(network, initialize_mode='TruncatedNormal'):
    parms = network.trainable_params()
    for p in parms:
        if 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name:
            if initialize_mode == 'TruncatedNormal':
                p.set_data(initializer(TruncatedNormal(0.02), p.data.shape, p.data.dtype))
            else:
                p.set_data(initialize_mode, p.data.shape, p.data.dtype)


def ssd_model_build():
    if model_name == 'ssd300':
        backbone = ssd_mobilenet_v2()
        ssd = SSD300(backbone=backbone)
        init_net_param(ssd)
    else:
        raise ValueError(f'config.moadel:{model_name} is not supported')
    return ssd


ssd = ssd_model_build()


class SigmoidFocalClassificationLoss(nn.Cell):
    def __init__(self, gamma=2.0, alpha=0.25):
        super(SigmoidFocalClassificationLoss, self).__init__()
        self.sigmoid_cross_entropy = ops.SigmoidCrossEntropyWithLogits()
        self.sigmoid = ops.Sigmoid()

        self.pow = ops.Pow()
        self.onehot = ops.OneHot()
        self.on_value = Tensor(1.0, ms.float32)
        self.off_value = Tensor(0.0, ms.float32)
        self.gamma = gamma
        self.alpha = alpha

    def construct(self, logits, label):
        label = self.onehot(label, ops.shape(logits)[-1], self.on_value, self.off_value)
        sigmoid_cross_entropy = self.sigmoid_cross_entropy(logits, label)
        sigmoid = self.sigmoid(logits)

        label = ops.cast(label, ms.float32)
        p_t = label * sigmoid + (1 - label) * (1 - sigmoid)
        modulating_factor = self.pow(1 - p_t, self.gamma)
        alpha_weight_factor = label * self.alpha + (1 - label) * (1 - self.alpha)
        focal_loss = modulating_factor * alpha_weight_factor * sigmoid_cross_entropy
        return focal_loss


class SSDWithLossCell(nn.Cell):
    def __init__(self, network):
        super(SSDWithLossCell, self).__init__()
        self.network = network
        self.less = ops.Less()
        self.tile = ops.Tile()
        self.reduce_sum = ops.ReduceSum()
        self.expand_dims = ops.ExpandDims()
        self.class_loss = SigmoidFocalClassificationLoss(2.0, 0.75)
        self.loc_loss = nn.SmoothL1Loss()

    def construct(self, x, gt_loc, gt_label, num_matched_boxes):
        pred_loc, pred_label = self.network(x)
        mask = ops.cast(self.less(0, gt_label), ms.float32)
        num_matched_boxes = self.reduce_sum(ops.cast(num_matched_boxes, ms.float32))

        mask_loc = self.tile(self.expand_dims(mask, -1), (1, 1, 4))
        smooth_l1 = self.loc_loss(pred_loc, gt_loc) * mask_loc
        loss_loc = self.reduce_sum(self.reduce_sum(smooth_l1, -1), -1)

        locc_cls = self.class_loss(pred_label, gt_label)
        loss_cls = self.reduce_sum(locc_cls, (1, 2))

        return self.reduce_sum((loss_cls + loss_loc) / num_matched_boxes)


net = SSDWithLossCell(ssd)

import math
import numpy as np


def get_lr(global_step, lr_init, lr_end, lr_max, warmup_epochs, total_epochs, steps_per_epoch):
    lr_each_step = []
    total_steps = steps_per_epoch * total_epochs
    warmup_steps = steps_per_epoch * warmup_epochs

    for i in range(total_steps):
        if i < warmup_steps:
            lr = lr_init + (lr_max - lr_init) * i / warmup_steps
        else:
            lr = lr_end + (lr_max - lr_end) * (
                    1. + math.cos(math.pi * (i - warmup_steps) / (total_steps - warmup_steps))) / 2.
        if lr < 0.0:
            lr = 0.0
        lr_each_step.append(lr)
    current_step = global_step
    lr_each_step = np.array(lr_each_step).astype(np.float32)
    learning_rate = lr_each_step[current_step]
    return learning_rate


from mindspore.context import ParallelMode

grad_scale = ops.MultitypeFuncGraph('grad_scale')


@grad_scale.register('Tensor', 'Tensor')
def tensor_grad_scale(scale, grad):
    return grad * ops.reciprocal()(scale)


class TrainingWrapper(nn.Cell):
    def __init__(self, network, optimizer, sens=1.0, use_global_norm=False):
        super(TrainingWrapper, self).__init__(auto_prefix=False)
        self.network = network
        self.network.set_grad()
        self.weights = ms.ParameterTuple(network.trainable_params())
        self.optimizer = optimizer
        self.grad = ops.GradOperation(get_by_list=True, sens_param=True)
        self.sens = sens
        self.reducer_flag = False
        self.grad_reducer = None
        self.use_global_norm = use_global_norm
        self.parallel_mode = ms.context.get_auto_parallel_context('parallel_mode')
        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
            self.reducer_flag = True
        if self.reducer_flag:
            mean = ms.context.get_auto_parallel_context('gradients_mean')
            if auto_parallel_context().get_device_num_is_set():
                degree = ms.context.get_auto_parallel_context('device_num')
            else:
                degree = get_group_size()
            self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
        self.hyper_map = ops.HyperMap()

    def construct(self, *args):
        weights = self.weights
        loss = self.network(*args)
        sens = ops.Fill()(ops.DType()(loss), ops.Shape()(loss), self.sens)
        grads = self.grad(self.network, weights)(*args, sens)
        if self.reducer_flag:
            grads = self.grad_reducer(grads)
        if self.use_global_norm:
            grads = self.hyper_map(ops.partial(grad_scale, ops.scalar_to_array(self.sens)), grads)
            grads = ops.clip_by_global_norm(grads)
        self.optimizer(grads)
        return loss


from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
from mindspore.train import Model
import os

loss_scale = 1024
save_checkpoint_epochs = 10
pre_trained_epoch_size = 0

warmup_epochs = 2
lr_init = 0.001
lr_end_rate = 0.001
momentum = 0.9
weight_decay = 0.00015

if not os.path.exists(output_path):
    os.mkdir(output_path)
ckpt_config = CheckpointConfig(save_checkpoint_steps=500)
ckpt_save_dir = output_path
ckpoint_cb = ModelCheckpoint(prefix='ssd', directory=ckpt_save_dir, config=ckpt_config)

if load_ckpt:
    param_dict = ms.load_checkpoint(pre_trained)
    ms.load_param_into_net(net, param_dict, True)

    lr = Tensor(get_lr(global_step=pre_trained_epoch_size * dataset_size, lr_init=lr_init, lr_end=lr_end_rate * 0.05
                       , lr_max=0.05, warmup_epochs=warmup_epochs, ))

    loss_scale = float(loss_scale)
    if device_target == 'CPU':
        loss_scale = 1.0
        ms.set_context(mode=ms.GRAPH_MODE, device_target='CPU')

    opt = nn.Momentum(filter(lambda x: x.require_grad, net.get_parameters()), lr, momentum, weight_decay, loss_scale)
    net = TrainingWrapper(net, opt, loss_scale)

    callback = [TimeMonitor(data_size=dataset_size), LossMonitor(per_print_times=500), ckpoint_cb]
    device_target = ms.context.get_context("device_target")
    print(device_target)
    model = Model(net)

    if mode_sink == 'sink' and device_target != "CPU":
        print('In sink mode, one epoch return a loss')
        dataset_sink_mode = True
    else:
        dataset_sink_mode = False

    print('Start train SSD,the first epoch will be slower because of the graph compilation.')
    model.train(epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode)


def apply_nms(all_boxes, all_scores, thres, max_boxes):
    y1 = all_boxes[:, 0]
    x1 = all_boxes[:, 1]
    y2 = all_boxes[:, 2]
    x2 = all_boxes[:, 3]
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)

    order = all_scores.argsort()[::-1]
    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)

        if len(keep) >= max_boxes:
            break

        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h

        ovr = inter / (areas[i] + areas[order[1:]] - inter)
        inds = np.where(ovr <= thres)[0]

        order = order[inds + 1]
    return keep


from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import json


class COCOMetrics:
    def __init__(self, anno_json, classes, num_classes, min_score, nms_threshold, max_boxes):
        self.num_classes = num_classes
        self.classes = classes
        self.min_score = min_score
        self.nms_threshold = nms_threshold
        self.max_boxes = max_boxes

        self.val_cls_dict = {i: cls for i, cls in enumerate(classes)}
        self.coco_gt = COCO(anno_json)
        cat_ids = self.coco_gt.loadCats(self.coco_gt.getCatIds())
        self.class_dict = {cat['name']: cat['id'] for cat in cat_ids}

        self.predictions = []
        self.img_ids = []

    def update(self, batch):
        pred_boxes = batch['boxes']
        box_scores = batch['box_scores']
        img_id = batch['img_id']
        h, w = batch['image_shape']

        final_boxes = []
        final_label = []
        final_score = []
        self.img_ids.append(img_id)

        for c in range(1, self.num_classes):
            class_box_scores = box_scores[:, c]
            score_mask = class_box_scores > self.min_score
            class_box_scores = class_box_scores[score_mask]
            class_boxes = pred_boxes[score_mask] * [h, w, h, w]

            if score_mask.any():
                nms_index = apply_nms(class_boxes, class_box_scores, self.nms_threshold, self.max_boxes)
                class_boxes = class_boxes[nms_index]
                class_box_scores = class_box_scores[nms_index]

                final_boxes += class_boxes.tolist()
                final_score += class_box_scores.tolist()
                final_label += [self.class_dict[self.val_cls_dict[c]]] * len(class_box_scores)

        for loc, label, score in zip(final_boxes, final_label, final_score):
            res = {}
            res['image_id'] = img_id
            res['bbox'] = [loc[1], loc[0], loc[3] - loc[1], loc[2] - loc[0]]
            res['score'] = score
            res['category_id'] = label
            self.predictions.append(res)

    def get_metrics(self):
        with open('predictions.json', 'w') as f:
            json.dump(self.predictions, f)

        coco_dt = self.coco_gt.loadRes('predictions.json')
        E = COCOeval(self.coco_gt, coco_dt, iouType='bbox')
        E.params.imgIds = self.img_ids
        E.evaluate()
        E.accumulate()
        E.summarize()
        return E.stats[0]


num_classes = 2
max_boxes = 100
nms_threshold = 0.6
min_score = 0.1


def apply_eval(eval_param_dict):
    net = eval_param_dict['net']
    net.set_train(False)
    ds = eval_param_dict['dataset']
    anno_json = eval_param_dict['anno_json']
    coco_metrics = COCOMetrics(anno_json=anno_json, classes=classes, num_classes=num_classes, max_boxes=max_boxes,
                               nms_threshold=nms_threshold, min_score=min_score)
    for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1):
        img_id = data['img_id']
        img_np = data['image']
        image_shape = data['image_shape']

        output = net(Tensor(img_np))

        for batch_idx in range(img_np.shape[0]):
            pred_batch = {
                'boxes': output[0].asnumpy()[batch_idx]
                , 'box_scores': output[1].asnumpy()[batch_idx],
                'img_id': int(np.squeeze(img_id[batch_idx])),
                'image_shape': image_shape[batch_idx]
            }
            coco_metrics.update(pred_batch)
        eval_metrics = coco_metrics.get_metrics()
        return eval_metrics


class SsdInferWithDecoder(nn.Cell):
    def __init__(self, network, default_boxes):
        super(SsdInferWithDecoder, self).__init__()
        self.network = network
        self.default_boxes = default_boxes
        self.prior_scaling_xy = prior_scaling[0]
        self.prior_scaling_wh = prior_scaling[1]

    def construct(self, x):
        pred_loc, pred_label = self.network(x)

        default_bbox_xy = self.default_boxes[..., :2]
        default_bbox_wh = self.default_boxes[..., 2:]
        pred_xy = pred_loc[..., :2] * self.prior_scaling_xy * default_bbox_wh + default_bbox_xy
        pred_wh = ops.Exp()(pred_loc[..., 2:] * self.prior_scaling_wh) * default_bbox_wh

        pred_xy_0 = pred_xy - pred_wh / 2.0
        pred_xy_1 = pred_xy + pred_wh / 2.0
        pred_xy = ops.Concat(-1)((pred_xy_0, pred_xy_1))
        pred_xy = ops.Maximum()(pred_xy, 0)
        pred_xy = ops.Minimum()(pred_xy, 1)
        return pred_xy, pred_label


your_best_ckpt = pre_trained


def ssd_eval(dataset_path, ckpt_path, anno_json):
    batch_size = 1
    ds = create_ssd_dataset(dataset_path, batch_size=batch_size, is_training=False, use_multiprocessing=False)

    if model_name == 'ssd300':
        net = SSD300(ssd_mobilenet_v2(), is_training=False)
    else:
        raise ValueError(f'model:{model_name} is not supported')
    net = SsdInferWithDecoder(net, Tensor(default_boxes))

    print('load checkpoint!')
    param_dict = ms.load_checkpoint(your_best_ckpt)
    net.init_parameters_data()
    ms.load_param_into_net(net, param_dict)

    net.set_train(False)
    total = ds.get_dataset_size() * batch_size
    print('\n============================\n')
    print('tatal images num:', total)
    print('processing,please wait a moment.')

    eval_parm_dict = {'net': net, 'dataset': ds, 'anno_json': anno_json}
    mAP = apply_eval(eval_parm_dict)
    print(f'\nmAp:{mAP}')


voc_root = data_path
json_path = os.path.join(voc_root, voc_json)
ms.context.set_context(mode=ms.GRAPH_MODE, device_target=device_target)

mindrecord_file = create_mindrecord('voc', 'ssd_eval.mindrecord', is_training=False)
print('start eval')
ssd_eval(mindrecord_file, your_best_ckpt, json_path)

import PIL.ImageDraw as ImageDraw
from PIL import Image
import matplotlib.pyplot as plt

backbone = ssd_mobilenet_v2()
ssd = SSD300(backbone=backbone)
net = SsdInferWithDecoder(ssd, Tensor(default_boxes))
param_dict = ms.load_checkpoint(pre_trained)
ssd.init_parameters_data()
ms.load_param_into_net(net, param_dict)

mindrecord_file_1 = create_mindrecord('voc', prefix='ssd_test.mindrecord', is_training=False, is_testing=True)
eval_dataset = create_ssd_dataset(mindrecord_file_1, device_num=device_num, rank=rank, batch_size=1, is_testing=True,
                                  use_multiprocessing=False)
for data in eval_dataset.create_dict_iterator():
    img = data['image']
    box = data['box']
pre_loc, prelabel = net(img)

default_bbox_xy = default_boxes[..., :2]
default_bbox_wh = default_boxes[..., 2:]
pred_xy = pre_loc[..., :2] * prior_scaling[0] * Tensor(default_bbox_wh) + Tensor(default_bbox_xy)
pred_wh = ops.Exp()(pre_loc[..., 2:] * prior_scaling[1]) * Tensor(default_bbox_wh)
x = pred_xy[0][0][0]
y = pred_xy[0][0][1]
w = pred_wh[0][0][0]
h = pred_wh[0][0][1]

img = Image.open('dataset/pedestrian_detection/test/Images/FLIR_10097.jpg')
draw = ImageDraw.Draw(img)
im_width, im_height = img.size
xmin = (x - w) / 2
ymin = (y - h) / 2
xmax = (x + w) / 2
ymax = (y + h) / 2
(left, right, top, bottom) = (
    (-xmin * 1) * im_width, ((xmax * 1) + 0.1) * im_width, ((-ymin * 1) + 0.3) * im_height,
    ((ymax * 1) + 0.6) * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=20, fill='Orange')
plt.imshow(img)
plt.show()
