import math
import os
from mindspore import context

# 本电脑Unsupported device target Ascend
# device_target_ = "Ascend"
from mindspore.nn import SiLU

device_target_ = "CPU"
context.set_context(mode=context.GRAPH_MODE, device_target=device_target_, save_graphs=False)
print('success 当前已经设置为{}'.format(device_target_))

import requests
import time
import zipfile


def download_and_unzip(url, path):
    if not os.path.exists(path):
        os.mkdir(path)
    file_path = os.path.join(path, 'minivoc.zip')
    start = time.time()
    user, password = 'tyx_neu', 'Sportlab307'
    resp = requests.get(url, auth=(user, password), stream=True)
    size = 0
    chunk_size = 1024
    content_size = int(resp.headers['content-length'])
    try:
        if resp.status_code == 200:
            print('start download,[file size]:{size:.2f}MB'.format(size=content_size / chunk_size / 1024))
            with open(file_path, 'wb') as file:
                for data in resp.iter_content(chunk_size=chunk_size):
                    file.write(data)
                    size += len(data)
                    print('\r' + '[下载进度]：%s%.2f%%' % (
                        '>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')
        end = time.time()
        print('\n' + 'download completed!,times:%.2f秒' % (end - start))
    except ValueError:
        print('error')
    unip_file_path = path
    if not os.path.exists(unip_file_path):
        os.mkdir(unip_file_path)
    zip_file = zipfile.ZipFile(file_path)
    zip_list = zip_file.namelist()
    for f in zip_list:
        zip_file.extract(f, unip_file_path)
    zip_file.close()
    print('successfully unzip download dataset from website')


url = 'https://openi.pcl.ac.cn/attachments/3d2cd2c6-3b55-4146-901e-4a48f6bbc636?type=0'
# download_and_unzip(url, os.path.join(os.getcwd(), './'))
download_and_unzip(url, os.path.join(os.getcwd(), 'dataset'))

import matplotlib.pyplot as plt
from PIL import Image

img0 = Image.open('images/000104.jpg')
img1 = Image.open('images/000104_label.png')
plt.figure(figsize=(20, 20))
plt.subplot(1, 2, 1)
plt.title('Primary')
plt.imshow(img0)
plt.subplot(1, 2, 1)
plt.title('Label')
plt.imshow(img1)
plt.show()

from pycocotools.coco import COCO
import random
import numpy as np
import os
import multiprocessing
import cv2
from PIL import Image
import mindspore.dataset as ds


def has_valid_annotation(anno):
    if not anno:
        return False
    if _has_only_empty_bbox(anno):
        return False
    if 'keypoints' not in anno[0]:
        return True

    if _count_visible_keypoints(anno) >= min_keypoints_per_image:
        return True
    return False


def _has_only_empty_bbox(anno):
    return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in anno)


def _count_visible_keypoints(anno):
    return sum(sum(1 for v in ann['keypoints'][2::3] if v > 0) for ann in anno)


class COCOYoloDataset:
    def __init__(self, root, ann_file, remove_images_without_annotations=True,
                 filter_crowd_anno=True, is_training=True):
        self.coco = COCO(ann_file)
        self.root = root
        self.img_ids = list(sorted(self.coco.imgs.keys()))
        self.filter_crowd_anno = filter_crowd_anno
        self.is_training = is_training
        self.mosaic = True

        if remove_images_without_annotations:
            img_ids = []
            for img_id in self.img_ids:
                ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
                anno = self.coco.loadAnns(ann_ids)
                if has_valid_annotation(anno):
                    img_ids.append(img_id)
            self.img_ids = img_ids

        self.categories = {cat['id']: cat['name'] for cat in self.coco.cats.values()}

        self.cat_ids_to_continuous_ids = {
            v: i for i, v in self.cat_ids_to_continuous_ids.items()
        }

        self.count = 0

    def _mosaic_preprocess(self, index, input_size):
        labels4 = []
        s = 384
        self.mosaic_border = [-s // 2, -s // 2]
        yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border]
        indices = [index] + [random.randint(0, len(self.img_ids) - 1) for _ in range(3)]

        for i, img_ids_index in enumerate(indices)
            coco = self.coco
            img_id = self.img_ids[img_ids_index]
            img_path = coco.loadImgs(img_id)[0]['file_name']
            img = Image.open(os.path.join(self.root, img_path)).convert('RGB')
            h, w = img.shape[:2]

            if i == 0:
                img4 = np.full((s * 2, s * 2, img.shape[2]), 128, dtype=np.uint8)
                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc
                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
            elif i == 1:
                x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + 2, s * 2), yc
                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
            elif i == 2:
                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h),
            elif i == 3:
                x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)

            img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]

            padw = x1a - x1b
            padh = y1a - y1b

            ann_ids = coco.getAnnIds(imgIds=img_id)
            target = coco.loadAnns(ann_ids)
            if self.filter_crowd_anno:
                annos = [anno for anno in target if anno['iscrowd'] == 0]
            else:
                annos = [anno for anno in target]

            target = {}
            boxes = [anno['bbox'] for anno in annos]
            target['bboxes'] = boxes

            classes = [anno['category_id'] for anno in annos]
            classes = [self.cat_ids_to_continuous_ids[c1] for c1 in classes]
            target['labels'] = classes

            bboxes = target['bboxes']
            labels = target['labels']
            out_target = []

            for bbox, label in zip(bboxes, labels):
                temp = []
                bbox = self._convetTopDown(bbox)
                temp.extend(bbox)
                temp.append(int(label))

                out_target.append(temp)

            lebels = out_target.copy()
            labels = np.array(lebels)
            out_target = np.array(out_target)

            lebels[:, 0] = out_target[:, 0] + padw
            labels[:, 1] = out_target[:, 1] + padh
            labels[:, 2] = out_target[:, 2] + padw
            labels[:, 3] = out_target[:, 3] + padh
            labels4.append(labels)

        if labels4:
            labels4 = np.concatenate(labels4, 0)
            np.clip(labels4[:, :4], 0, 2 * s, out=labels4[:, :4])
        flag = np.array([1])
        return img4, labels4, input_size, flag

    def __getitem__(self, index):
        coco = self.coco
        img_id = self.img_ids[index]
        img_path = coco.loadImgs(img_id)[0]['file_name']
        if not self.is_training:
            img = Image.open(os.path.join(self.root, img_path)).convert('RGB')
            return img, img_id

        input_size = [640, 640]
        if self.mosaic and random.random() < 0.5:
            return self._mosaic_preprocess(index, input_size)
        img = np.fromfile(os.path.join(self.root, img_path), dtype='int8')
        ann_ids = coco.getAnnIds(imgIds=img_id)
        target = coco.loadAnns(ann_ids)

        if self.filter_crowd_anno:
            annos = [anno for anno in target if anno['iscrowd'] == 0]
        else:
            annos = [anno for anno in target]

        target = {}
        boxes = [anno['box'] for anno in annos]
        target['bboxes'] = boxes

        classes = [anno['category_id'] for anno in annos]
        classes = [self.cat_ids_to_continuous_ids[c1] for c1 in classes]
        target['labels'] = classes

        bboxes = target['bboxes']
        labels = target['labels']
        out_target = []
        for bbox, label in zip(bboxes, labels):
            tmp = []
            bbox = self._convetTopDown(bbox)
            tmp.extend(bbox)
            tmp.append(int(label))
            out_target.append(tmp)

        flag = np.array([0], dtype=np.int32)
        return img, out_target, input_size, flag

    def __len__(self):
        return len(self.img_ids)

    def _convetTopDown(self, bbox):
        x_min = bbox[0]
        y_min = bbox[1]
        w = bbox[2]
        h = bbox[3]
        return [x_min, y_min, w, h]


import mindspore.dataset as ds
from src.distributed_sampler import DistributedSampler
import multiprocessing
from src.transforms import reshape_fn, MultiScaleTrans, PreprocessTrueBox


def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank, config=None, is_training=True,
                        shuffle=True):
    cv2.setNumThreads(2)
    ds.config.set_enable_shared_mem(True)
    if is_training:
        filter_crowd = True
        remove_empty_anno = True
    else:
        filter_crowd = False
        remove_empty_anno = False

    yolo_dataset = COCOYoloDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
                                   remove_images_without_annotations=remove_empty_anno, is_training=is_training)
    distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
    yolo_dataset.size = len(distributed_sampler)

    hwc_to_chw = ds.vision.HWC2CHW()
    config.dataset_size = len(yolo_dataset)
    cores = multiprocessing.cpu_count()
    num_parallel_workers = int(cores / device_num)
    num_parallel_workers = config.num_parallel_workers
    if is_training:
        multi_scale_trans = MultiScaleTrans(config, device_num)
        yolo_dataset.transforms = multi_scale_trans

        data_column_names = ['image', 'annotation', 'input_size', 'mosaic_flag']
        output_column_names = ['image', 'annotation', 'bbox1', 'bbox2', 'bbox3',
                               'gt_box1', 'gt_box2', 'gt_box3']
        map1_out_column_names = ['image', 'annotation', 'size']
        map2_in_column_names = ['annotation', 'size']
        map2_out_coumn_names = ['annotation', 'bbox1', 'bbox2', 'bbox3',
                                'gt_box1', 'gt_box2', 'gt_box3']
        dataset = ds.GeneratorDataset(yolo_dataset, column_names=data_column_names, sampler=distributed_sampler,
                                      python_multiprocessing=False, num_parallel_workers=1)
        dataset = dataset.map(operations=multi_scale_trans, input_columns=data_column_names,
                              out_columns=map1_out_column_names, column_order=map2_out_coumn_names,
                              num_parallel_workers=1, python_multiprocessing=False)
        dataset = dataset.map(operations=PreprocessTrueBox(config), input_columns=map2_in_column_names,
                              output_columns=map2_out_coumn_names, column_order=output_column_names,
                              num_parallel_workers=1, python_multiprocessing=False)

        mean = [m * 255 for m in [0.485, 0.456, 0.406]]
        std = [s * 255 for s in [0.229, 0.224, 0.225]]

        dataset = dataset.map([ds.vision.Normalie(mean, std), hwc_to_chw],
                              mum_parallel_workers=min(4, num_parallel_workers))

        def concatenate(images):
            images = np.concatenate((images[..., ::2, ::2], images[..., 1::2, ::2],
                                     images[..., ::2, 1::2], images[..., 1::2, 1::2]))
            return images

        dataset = dataset.map(operations=concatenate, input_columns='image',
                              num_parallel_workers=min(4, num_parallel_workers))
        dataset = dataset.batch(batch_size, num_parallel_workers=min(4, num_parallel_workers), drop_remainder=True)

    else:
        dataset = ds.GeneratorDataset(yolo_dataset, column_names=['image', "img_id"],
                                      sampler=distributed_sampler)
        compose_map_func = (lambda image, img_id: reshape_fn(image, img_id, config))
        dataset = dataset.map(operations=compose_map_func, input_columns=['image', 'img_id'],
                              output_columns=['image', 'image_shape', 'img_id'],
                              column_order=['image', 'image_shape', 'img_id'], num_parallel_workers=8)
        dataset = dataset.map(operations=hwc_to_chw, input_column=['image'], num_parallel_workers=8)
        dataset = dataset.batch(batch_size, drop_remainder=True)
    return dataset


from src.config import config

config.data_root = './minivoc/images'
config.data_path = './minivoc/train.json'
config.annFile = './minivoc/train.json'
ds = create_yolo_dataset(image_dir=config.data_root, anno_path=config.annFile, is_training=True,
                         batch_size=config.per_batch_size, device_num=config.group_size,
                         rank=config.rank, config=config)

import mindspore.nn as nn
import mindspore.ops as ops


class Conv(nn.Cell):
    def __init__(self, c1, c2, k=1, s=1, p=None,
                 dilation=1, alpha=0.1, momentum=0.97, eps=1e-3, pad_mode='same', act=True):
        super(Conv, self).__init__()
        self.padding = auto_pad(k, p)
        self.pad_mode = None
        if self.padding == 0:
            self.pad_mode = 'same'
        elif self.padding == 1:
            self.pad_mode = 'pad'
        self.conv = nn.Conv2d(
            c1, c2, k, s, padding=self.padding, pad_mode=self.pad_mode, has_bias=False
        )
        self.bn = nn.BatchNorm2d(c2, momentum=momentum, eps=eps)
        self.act = SiLU() if act is True else (
            act if isinstance(act, nn.Cell) else ops.Identity()
        )

    def construct(self, x):
        return self.act(self.bn(self.conv(x)))


class Bottleneck(nn.Cell):
    def __init__(self, c1, c2, shortcut=True, e=0.5):
        super(Bottleneck, self).__init__()
        c_ = int(c2 * e)
        self.conv1 = Conv(c1, c_, 1, 1)
        self.conv2 = Conv(c_, c2, 3, 1)
        self.add = shortcut and c1 == c2

    def construct(self, x):
        c1 = self.conv1(x)
        c2 = self.conv2(c1)
        out = c2
        if self.add:
            out = x + out
        return out


class BottlenectCSP(nn.Cell):
    def __init__(self, c1, c2, n=1, shortcut=True, e=0.5):
        super(BottlenectCSP, self).__init__()
        c_ = int(c2 * e)
        self.conv1 = Conv(c1, c_, 1, 1)
        self.conv2 = Conv(c1, c_, 1, 1)
        self.conv3 = Conv(2 * c_, c2, 1)
        self.m = nn.SequentialCell(
            [BottlenectCSP(c_, c_, shortcut, e=1.0) for _ in range(n)]
        )
        self.concat = ops.concat(axis=1)

    def construct(self, x):
        c1 = self.conv1(x)
        c2 = self.m(c1)
        c3 = self.conv2(x)
        c4 = self.concat((c2, c3))
        c5 = self.conv3(c4)
        return c5


class SPP(nn.Cell):
    def __init__(self, c1, c2, k=(5, 6, 13)):
        super(SPP, self).__init__()
        c_ = c1 // 2
        self.conv1 = Conv(c1, c_, 1, 1)
        self.conv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)

        self.maxpool1 = nn.MaxPool2d(kernel_size=5, stride=1, pad_mode='same')
        self.maxpool2 = nn.MaxPool2d(kernel_size=9, stride=1, pad_mode='same')
        self.maxpool3 = nn.MaxPool2d(kernel_size=13, stride=1, pad_mode='same')
        self.concat = ops.Concat(axis=1)

    def construct(self, x):
        c1 = self.conv1(x)
        m1 = self.maxpool1(c1)
        m2 = self.maxpool2(c1)
        m3 = self.maxpool3(c1)
        c4 = self.concat((c1, m1, m2, m3))
        c5 = self.conv2(c4)
        return c5


class Focus(nn.Cell):
    def __init__(self, c1, c2, k=1, s=1, p=None, act=True):
        super(Focus, self).__init__()
        self.conv = Conv(c1 * 4, c2, k, s, p, act)

    def construct(self, x):
        c1 = self.conv(x)
        return c1


class SiLU(nn.Cell):
    def __init__(self):
        super(SiLU, self).__init__()
        self.sigmoid = ops.Sigmoid()

    def construct(self, x):
        return x * self.sigmoid(x)


class YOLOv5Backbone(nn.Cell):
    def __init__(self, shape):
        super(YOLOv5Backbone, self).__init__()
        self.focus = Focus(shape[0], shape[1], k=3, s=1)
        self.conv1 = Conv(shape[1], shape[2], k=3, s=2)
        self.CSP1 = BottlenectCSP(shape[2], shape[2], n=1 * shape[6])
        self.conv2 = Conv(shape[2], shape[3], k=3, s=2)
        self.CSP2 = BottlenectCSP(shape[3], shape[3], n=3 * shape[6])
        self.conv3 = Conv(shape[3], shape[4], k=3, s=2)
        self.CSP3 = BottlenectCSP(shape[4], shape[4], n=3 * shape[6])
        self.conv4 = Conv(shape[4], shape[5], k=3, s=2)
        self.spp = SPP(shape[5], shape[5], k=[5, 9, 13])
        self.CSP4 = BottlenectCSP(shape[5], shape[5], n=1 * shape[6], shortcut=False)

    def construct(self, x):
        c1 = self.focus(x)
        c2 = self.conv1(c1)
        c3 = self.CSP1(c2)
        c4 = self.conv2(c3)

        c5 = self.CSP2(c4)
        c6 = self.conv3(c5)

        c7 = self.CSP3(c6)
        c8 = self.conv4(c7)
        c9 = self.spp(c8)

        c10 = self.CSP4(c9)
        return c5, c7, c10


class YoloBlock(nn.Cell):
    def __init__(self, in_channels, out_channels):
        super(YoloBlock, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, has_bias=True)

    def construct(self, x):
        out = self.conv(x)
        return out


class YOLO(nn.Cell):
    def __init__(self, backbone, shape):
        super(YOLO, self).__init__()
        self.backbone = backbone
        self.config = default_config
        self.config.out_channel = (self.config.out_channel + 5) * 3

        self.conv1 = Conv(shape[5], shape[4], k=1, s=1)
        self.CSP5 = BottlenectCSP(shape[5], shape[4], n=1 * shape[6], shortcut=False)
        self.conv2 = Conv(shape[4], shape[3], k=1, s=1)
        self.CSP6 = BottlenectCSP(shape[4], shape[3], n=1 * shape[6], shortcut=False)
        self.conv3 = Conv(shape[3], shape[3], k=3, s=2)
        self.CSP7 = BottlenectCSP(shape[4], shape[4], n=1 * shape[6], shortcut=False)
        self.conv4 = Conv(shape[4], shape[4], k=3, s=2)
        self.CSP8 = BottlenectCSP(shape[5], shape[5], n=1 * shape[6], shortcut=False)
        self.back_block1 = YoloBlock(shape[3], self.config.out_channel)
        self.back_block2 = YoloBlock(shape[4], self.config.out_channel)
        self.back_block3 = YoloBlock(shape[5], self.config.out_channel)

        self.concat = ops.concat(axis=1)

    def construct(self, x):
        img_height = x.shape[2] * 2
        img_wight = x.shape[3] * 2

        feature_map1, feature_map2, feature_map3 = self.backbone(x)
        c1 = self.conv1(feature_map3)
        ups1 = ops.ResizeNearestNeighbor((img_height // 16, img_wight // 16))(c1)
        c2 = self.concat((ups1, feature_map2))
        c3 = self.CSP5(c2)
        c4 = self.conv2(c3)
        ups2 = ops.ResizeNearestNeighbor((img_height // 8, img_wight // 8))(c4)
        c5 = self.concat((ups2, feature_map1))

        c6 = self.CSP6(c5)
        c7 = self.conv3(c6)

        c8 = self.concat((c7, c4))

        c9 = self.CSP7(c8)
        c10 = self.conv4(c9)
        c11 = self.concat((c10, c1))

        c12 = self.CSP8(c11)
        small_object_output = self.back_block1(c6)
        medium_object_output = self.back_block2(c9)
        big_object_output = self.back_block3(c12)
        return small_object_output, medium_object_output, big_object_output


from src.config import config as default_config
import mindspore as ms


class DetectionBlock(nn.Cell):
    def __init__(self, scale, config=default_config, is_training=True):
        self.config = config
        if scale == 's':
            idx = (0, 1, 2)
            self.scale_x_y = 1.2
            self.offset_x_y = 0.1
        elif scale == 'm':
            idx = (3, 4, 5)
            self.scale_x_y = 1.1
            self.offset_x_y = 0.05
        elif scale == 'l':
            idx = (6, 7, 8)
            self.scale_x_y = 1.05
            self.offset_x_y = 0.025
        else:
            raise KeyError('invalid scale value for detectionBlock')

        self.anchors = ms.Tensor([self.config.anchor_scales[i] for i in idx], ms.float32)
        self.num_anchors_per_scale = 3
        self.num_attrib = 4 + 1 + self.config.num_classes
        self.lambda_coord = 1

        self.sigmoid = nn.Sigmoid()
        self.reshape = ops.Reshape()
        self.tile = ops.Tile()
        self.concat = ops.Concat(axis=-1)
        self.pow = ops.Pow()
        self.transpose = ops.Transpose()
        self.exp = ops.Exp()
        self.conf_training = is_training

    def construct(self, x, input_shape):
        num_batch = x.shape[0]
        grid_size = x.shape[2:4]

        prediction = self.reshape(x, (num_batch, self.num_anchors_per_scale, self.num_attrib,
                                      grid_size[0], grid_size[1]))
        prediction = self.transpose(prediction, (0, 3, 4, 1, 2))

        grid_x = ms.numpy.arange(grid_size[1])
        grid_y = ms.numpy.arange(grid_size[0])
        grid_x = self.tile(self.reshape(grid_x, (1, 1, -1, 1, 1)), (1, grid_size[0], 1, 1, 1))
        grid_y = self.tile(self.reshape(grid_y, (1, -1, 1, 1, 1)), (1, 1, grid_size[1], 1, 1))
        grid = self.concat((grid_x, grid_y))

        box_xy = prediction[:, :, :, :2]
        box_wh = prediction[:, :, :, 2:4]
        box_confidence = prediction[:, :, :, :, 4:5]
        box_probs = prediction[:, :, :, :, 5:]

        box_xy = (self.scale_x_y * self.sigmoid(box_xy) - self.offset_x_y + grid) /
        ops.cast(ops.tuple_to_array((grid_size[1], grid_size[0])), ms.float32)

        box_wh = self.exp(box_wh) * self.anchors / input_shape

        box_confidence = self.sigmoid(box_confidence)
        box_probs = self.sigmoid(box_probs)

        if self.conf_training:
            return prediction, box_xy, box_wh
        return self.concat((box_xy, box_wh, box_confidence, box_probs))


class YOLOV5(nn.Cell):
    def __init__(self, is_training, version=0):
        super(YOLOV5, self).__init__()
        self.config = default_config

        self.shape = self.config.input_shape[version]
        self.feature_map = YOLO(backbone=YOLOv5Backbone(shape=self.shape), shape=self.shape)

        self.detect_1 = DetectionBlock('1', is_training=is_training)
        self.detect_2 = DetectionBlock('2', is_training=is_training)
        self.detect_3 = DetectionBlock('3', is_training=is_training)

    def construct(self, x, input_shape):
        small_object_output, medium_object_output, big_object_output = self.feature_map(x)
        output_big = self.detect_1(big_object_output, input_shape)
        output_me = self.detect_2(medium_object_output, input_shape)
        output_small = self.detect_3(small_object_output, input_shape)

        return output_big, output_me, output_small


from src.loss import ConfidenceLoss, ClassLoss


class Iou(nn.Cell):
    def __init__(self):
        super(Iou, self).__init__()
        self.min = ops.Minimum()
        self.max = ops.Maximum()
        self.squeeze = ops.Squeeze()

    def construct(self, box1, box2):
        box1_xy = box1[:, :, :, :, :, :2]
        box1_wh = box1[:, :, :, :, :, 2:4]
        box1_mins = box1_xy - box1_wh / ops.scalar_to_array(2.0)
        box1_maxs = box1_xy + box1_wh / ops.scalar_to_array(2.0)

        box2_xy = box2[:, :, :, :, :, :2]
        box2_wh = box2[:, :, :, :, :, 2:4]
        box2_mins = box2_xy - box2_wh / ops.scalar_to_array(2.0)
        box2_maxs = box2_xy - box2_wh / ops.scalar_to_array(2.0)

        intersect_mins = self.max(box1_mins, box2_mins)
        intersect_maxs = self.min(box1_maxs, box2_maxs)
        intersect_wh = self.max(intersect_maxs - intersect_mins, ops.scalar_to_array(0.0))

        intersect_area = self.squeeze(intersect_wh[:, :, :, :, :, 0.1]) * self.squeeze(
            intersect_wh[:, :, :, :, :, 1:2]
        )
        box1_area = self.squeeze(box1_wh[:, :, :, :, :, 0:1]) * self.squeeze(
            box1_wh[:, :, :, :, :, 1:2]
        )
        box2_area = self.squeeze(box2_wh[:, :, :, :, :, 0:1]) * self.squeeze(
            box2_wh[:, :, :, :, :, 1:2]
        )
        iou = intersect_area / (box1_area + box2_area - intersect_area)
        return iou


class GIOU(nn.Cell):
    def __init__(self):
        super(GIOU, self).__init__()
        self.reshape = ops.Reshape()
        self.min = ops.Minimum()
        self.max = ops.Maximum()
        self.concat = ops.concat(axis=1)
        self.mean = ops.ReduceMean()
        self.div = ops.RealDiv()
        self.eps = 0.000001

    def construct(self, box_p, box_gt):
        box_p_area = (box_p[..., 2:3] - box_p[..., 0:1]) * (box_p[..., 3:4] - box_p[..., 1:2])
        box_gt_area = (box_gt[..., 2:3] - box_gt[..., 0:1]) * (box_gt[..., 3:4] - box_gt[..., 1:2])
        x_1 = self.max(box_p[..., 0:1], box_gt[..., 0:1])
        x_2 = self.min(box_p[..., 2:3], box_gt[..., 2:3])
        y_1 = self.max(box_p[..., 1:2], box_gt[..., 1:2])
        y_2 = self.min(box_p[..., 3:4], box_gt[..., 3:4])
        intersection = (y_2 - y_1) * (x_2 - x_1)
        xc_1 = self.min(box_p[..., 0:1], box_gt[..., 0:1])
        xc_2 = self.max(box_p[..., 2:3], box_gt[..., 2:3])
        yc_1 = self.min(box_p[..., 1:2], box_gt[..., 1:2])
        yc_2 = self.max(box_p[..., 3:4], box_gt[..., 3:4])
        c_area = (xc_2 - xc_1) * (yc_2 - yc_1)
        union = box_p_area + box_gt_area - intersection
        union = union + self.eps
        c_area = c_area + self.eps
        iou = self.div(ops.cast(intersection, ms.float32), ops.cast(union, ms.float32))
        res_mid0 = c_area - union
        res_mid1 = self.div(ops.cast(res_mid0, ms.float32), ops.cast(c_area, ms.float32))
        giou = iou - res_mid1
        giou = ops.clip_by_value(giou, -1.0, 1.0)
        return giou


def xywh2x1y1x2y2(box_xywh):
    boxes_x1 = box_xywh[..., 0:1] - box_xywh[..., 2:3] / 2
    boxes_y1 = box_xywh[..., 1:2] - box_xywh[..., 3:4] / 2
    boxes_x2 = box_xywh[..., 0:1] + box_xywh[..., 2:3] / 2
    boxes_y2 = box_xywh[..., 1:2] + box_xywh[..., 3:4] / 2
    boxes_x1y1x2y2 = ops.concat(-1)((boxes_x1, boxes_y1, boxes_x2, boxes_y2))
    return boxes_x1y1x2y2


class YoloLossBlock(nn.Cell):
    def __init__(self, scale, config=default_config):
        super(YoloLossBlock, self).__init__()
        self.config = config
        if scale == 's':
            idx = (0, 1, 2)
        elif scale == 'm':
            idx = (3, 4, 5)
        elif scale == 'l':
            idx = (6, 7, 8)
        else:
            raise KeyError('Invalid scale value for DetectionBlock')
        self.anchors = ms.Tensor([self.config.anchor_scales[i] for i in idx], ms.float32)
        self.ignore_threshold = ms.Tensor(self.config.ignore_threshold, ms.float32)
        self.concat = ops.Concat(axis=-1)
        self.iou = Iou()
        self.reduce_max = ops.ReduceMax(keep_dims=False)
        self.confidence_loss = ConfidenceLoss()
        self.confidence_loss = ClassLoss()

        self.reduce_sum = ops.ReduceSum()
        self.select = ops.Select()
        self.equal = ops.Equal()
        self.reshape = ops.Reshape()
        self.expand_dims = ops.ExpandDims()
        self.ones_like = ops.OnesLike()
        self.log = ops.Log()
        self.tuple_to_array = ops.TupleToArray()
        self.g_iou = GIOU()

    def construct(self, prediction, pred_xy, pred_wh, y_true, gt_box, input_shape):
        object_mask = y_true[:, :, :, :, 4:5]
        class_probs = y_true[:, :, :, :, 5:]
        true_boxes = y_true[:, :, :, :, :4]

        grid_shape = prediction.shape[1:3]
        grid_shape = ops.cast(self.tuple_to_array(grid_shape[::-1]), ms.float32)

        pred_boxes = self.concat((pred_xy, pred_wh))
        true_wh = y_true[:, :, :, :, 2:4]
        true_wh = self.select(self.equal(true_wh, 0.0), self.ones_like(true_wh),
                              true_wh)
        true_wh = self.log(true_wh / self.anchors * input_shape)
        box_loss_scale = 2 - y_true[:, :, :, :, 2:3] * y_true[:, :, :, :, 3:4]

        gt_shape = gt_box.shape
        gt_box = self.reshape(gt_box, (gt_shape[0], 1, 1, 1, gt_shape[1], gt_shape[2]))

        iou = self.iou(self.expand_dims(pred_boxes, -2), gt_box)
        best_iou = self.reduce_max(iou, -1)

        ignore_mask = best_iou < self.ignore_threshold
        ignore_mask = ops.cast(ignore_mask, ms.float32)
        ignore_mask = self.expand_dims(ignore_mask, -1)

        ignore_mask = ops.stop_gradient(ignore_mask)

        confidence_loss = self.confidence_loss(object_mask, prediction[:, :, :, :, 4:5], ignore_mask)
        class_loss = self.class_loss(object_mask, prediction[:, :, :, :, 5:], class_probs)

        object_mask_me = self.reshape(object_mask, (-1 - 1))
        box_loss_scale_me = self.reshape(box_loss_scale, (-1, -1))
        pred_boxes_me = xywh2x1y1x2y2(pred_boxes)
        pred_boxes_me = self.reshape(pred_boxes_me, (-1, 4))
        true_boxes_me = xywh2x1y1x2y2(true_boxes)
        true_boxes_me = self.reshape(true_boxes_me, (-1, 4))
        c_iou = self.g_iou(pred_boxes_me, true_boxes_me)
        c_iou_loss = object_mask_me * box_loss_scale_me * (1 - c_iou)
        c_iou_loss_me = self.reduce_sum(c_iou_loss, ())
        loss = c_iou_loss_me * 4 + confidence_loss + class_loss
        batch_size = prediction.shape[0]
        return loss / batch_size


class YoloWithLossCell(nn.Cell):
    def __init__(self, network):
        super(YoloWithLossCell, self).__init__()
        self.yolo_network = network
        self.config = default_config
        self.loss_big = YoloLossBlock('l', self.config)
        self.loss_me = YoloLossBlock('m', self.config)
        self.loss_small = YoloLossBlock('s', self.config)
        self.tenser_to_array = ops.TupleToArray()

    def construct(self, x, y_true_0, y_true_1, y_true_2, gt_0, gt_1, gt_2, input_shape):
        input_shape = x.shape[2:4]
        input_shape = ops.cast(self.tenser_to_array(input_shape) * 2, ms.float32)

        yolo_out = self.yolo_network(x, input_shape)
        loss_l = self.loss_big(*yolo_out[0], y_true_0, gt_0, input_shape)
        loss_m = self.loss_me(*yolo_out[1], y_true_1, gt_1, input_shape)
        loss_s = self.loss_small(*yolo_out[2], y_true_2, gt_2, input_shape)
        return loss_l + loss_m + loss_s * 0.2


import msilib
from src.logger import get_logger
from src.initializer import default_recursive_init


class AverageMeter:
    def __init__(self, name, fmt=':f', tb_writer=None):
        self.name = name
        self.fmt = fmt
        self.reset()
        self.tb_writer = tb_writer
        self.cur_step = 1
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.num += val * n
        self.count += n
        self.avg = self.sum / self.count
        if self.tb_writer is not None:
            self.tb_writer.add_scalar(self.name, self.val, self.cur_step)
        self.cur_step += 1

    def __str__(self):
        fmtstr = '{name}:{avg' + self.fmt + '}'
        return fmtstr.format(**self.__dict__)


def get_param_groups(network):
    decay_params = []
    no_decay_params = []
    for x in network.trainable_params():
        parameter_name = x.name
        if parameter_name.endswith('.bias'):
            no_decay_params.append(x)
        elif parameter_name.endswith('.gamma'):
            no_decay_params.append(x)
        elif parameter_name.endswith('.beta'):
            no_decay_params.append(x)
        else:
            decay_params.append(x)
    return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params}]


def cpu_affinity(rank_id, device_num):
    import psutil
    cores = psutil.cpu_count()
    if cores < device_num:
        return
    process = psutil.Process()
    used_cpu_num = cores // device_num
    rank_id = rank_id % device_num

    used_cpu_list = [i for i in range(rank_id * used_cpu_num, (rank_id + 1) * used_cpu_num)]
    process.cpu_affinity(used_cpu_list)
    print(f'==={rank_id}/{device_num}===bind cpu:{used_cpu_list}')


def load_parameters(network, filename):
    param_dict = ms.load_checkpoint(filename)
    param_dict_new = {}
    for key, values in param_dict.items():
        if key.startswith('moments.'):
            continue
        elif key.startswith('yolo_network.'):
            param_dict_new[key[13:]] = values
        else:
            param_dict_new[key] = values
    ms.load_param_into_net(network, param_dict_new)


def get_device_id():
    device_id = os.getenv('DEVICE_ID', '0')
    return int(device_id)


def train_preprocess():
    if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.T_max:
        config.T_max = config.max_epoch
    config.lr_epochs = list(map(int, str(config.lr_epochs).split(',')))
    config.data_root = './minivoc/images'
    config.annFile = './minivoc/val.json'
    device_id = get_device_id()
    config.device_target = device_target_
    ms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target, device_id=device_id)

    if config.is_distributed:
        init_distribute()

    if config.device_target == 'GPU' and config.bind_cpu:
        cpu_affinity(config.rank, min(config.group_size, config.device_num))

    config.logger = get_logger(config.output_dir, config.rank)
    config.logger.save_args(config)


train_preprocess()
dict_version = {'yolov5s': 0, 'yolov5m': 1, 'yolov5l': 2, 'yolov5x': 3}
network = YOLOV5(is_training=True, version=dict_version[config.yolov5_version])
default_recursive_init(network)

import mindspore.dataset as ds

pretrained_file = 'yolov5s_ascend_v180_coco2017_official_cv_mAP37.1.ckpt'
if os.path.isfile(pretrained_file):
    load_parameters(network, pretrained_file)
    print('have load pretrained_file')
network = YoloWithLossCell(network)
steps_per_epoch = ds.get_dataset_size()


def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):
    lr_inc = ((float(base_lr)) - float(init_lr)) / float(warmup_steps)
    lr = float(init_lr) + lr_inc * current_step
    return lr


def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch, T_max, eta_min=0):
    base_lr = lr
    warmup_init_lr = 0
    total_steps = int(max_epoch * steps_per_epoch)
    warmup_steps = int(warmup_epochs * steps_per_epoch)

    lr_each_step = []
    for i in range(total_steps):
        last_epoch = i // steps_per_epoch
        if i < warmup_steps:
            lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
        else:
            lr = eta_min + (base_lr - eta_min) * (1. + math.cos(math.pi * last_epoch / T_max)) / 2
        lr_each_step.append(lr)

    return np.array(lr_each_step).astype(np.float32)


def get_lr(args, steps_per_epoch):
    if args.lr_scheduler == 'cosine_annealing':
        lr = warmup_cosine_annealing_lr(args.lr, steps_per_epoch, args.warmup_epochs, args.max_epoch,
                                        args.T_max, args.eta_min)
    else:
        raise NotImplementedError(args.lr_scheduler)
    return lr


lr = get_lr(config, steps_per_epoch)
opt = nn.Momentum(params=get_param_groups(network), momentum=config.momentum, learning_rate=ms.Tensor(lr),
                  weight_decay=config.weight_decay, loss_scale=config.loss_scale)
network = nn.TrainOneStepCell(network, opt, config.loss_scale // 2)
network.set_train()

data_loader = ds.create_tuple_iterator(do_copy=False)
first_step = True
t_end = time.time()

loss_meter = AverageMeter('loss')
for epoch_idx in range(config.max_epoch):
    for step_idx, data in enumerate(data_loader):
        images = data[0]
        input_shape = images.shape[2:4]
        input_shape = ms.Tensor(tuple(input_shape[::-1]), ms.float64)
        loss = network(images, data[2], data[3], data[4], data[5], data[6], data[7], input_shape)
        loss_meter.update(loss.asnumpy())

        if (epoch_idx * steps_per_epoch + step_idx) % config.log_interval == 0:
            time_used = time.time() - t_end
            if first_step:
                fps = config.per_batch_size * config.group_size / time_used
                per_step_time = time_used * 1000
                first_step = False
            else:
                fps = config.per_batch_size * config.log_interval * config.group_size / time_used
                per_step_time = time_used / config.log_interval * 1000
            config.logger.info('epoch[{}],{},fps:{:.2f} imgs/sec, lr:{},per step time:{}ms '.format(
                epoch_idx + 1, step_idx + 1, loss_meter, fps, lr[step_idx], per_step_time
            ))

            t_end = time.time()
            loss_meter.reset()

        if config.rank == 0:
            ckpt_name = os.path.join(config.output_dir, 'yolov5_{}_{}.ckpt'.format(
                epoch_idx + 1, steps_per_epoch
            ))
            ms.save_checkpoint(network, ckpt_name)
            print('{} has been saved in {}'.format(ckpt_name, config.output_dir))

import sys
from collections import defaultdict
import datetime
from pycocotools.cocoeval import COCOeval


class Redirct:
    def __init__(self):
        self.content = ''

    def write(self, content):
        self.content += content

    def flush(self):
        self.content = ''


class DetectionEngine:
    def __init__(self, args_detection, threshold):
        self.ignore_threshold = threshold
        self.labels = args_detection.labels
        self.num_classes = len(self.labels)
        self.results = {}
        self.file_path = ''
        self.save_prefix = args_detection.output_dir
        self.ann_file = args_detection.ann_file
        self._coco = COCO(self.ann_file)
        self._img_ids = list(sorted(self._coco.imgs.keys()))
        self.det_boxes = []
        self.nms_thresh = args_detection.eval_nms_thresh
        self.multi_label = args_detection.multi_label
        self.multi_label_thresh = args_detection.multi_label_thresh
        self.coco_catids = self._coco.getCatIds()
        self.coco_catids = args_detection.coco_ids

    def do_nms_for_results(self):
        for img_id in self.results:
            for clsi in self.results[img_id]:
                dets = self.results[img_id][clsi]
                dets = np.array(dets)
                keep_index = self._diou_nms(dets, thresh=self.nms_thresh)

                keep_box = [{'image_id': int(img_id), 'category_id': int(clsi), 'bbox': list(dets[i][:4].astype(float)),
                             'score': dets[i][4].astype(float)} for i in keep_index]
                self.det_boxes.extend(keep_box)

    def _nms(self, predicts, threshold):
        x1 = predicts[:, 0]
        y1 = predicts[:, 1]
        x2 = x1 + predicts[:, 2]
        y2 = y1 + predicts[:3]
        scores = predicts[:, 4]

        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]

        reserved_boxes = []
        while order.size > 0:
            i = order[0]
            reserved_boxes.append(i)
            max_x1 = np.maximum(x1[i], x1[order[1:]])
            max_y1 = np.maximum(y1[i], y1[order[1:]])
            min_x2 = np.minimum(x2[i], x2[order[1:]])
            min_y2 = np.minimum(y2[i], y2[order[1:]])

            intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
            intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
            intersect_area = intersect_w * intersect_h
            ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)

            indexes = np.where(ovr <= threshold)[0]
            order = order[indexes + 1]
        return reserved_boxes

    def _diou_nms(self, dets, thresh=0.5):
        x1 = dets[:, 0]
        y1 = dets[:, 1]
        x2 = x1 + dets[:, 2]
        y2 = y1 + dets[:, 3]
        scores = dets[:, 4]
        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]
        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])

            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            center_x1 = (x1[i] + x2[i]) / 2
            center_x2 = (x1[order[1:]] + x2[order[1:]]) / 2
            center_y1 = (y1[i] + y2[i]) / 2
            center_y2 = (y1[order[1:]] + y2[order[1:]]) / 2
            inter_diag = (center_x2 - center_x1) ** 2 + (center_y2 - center_y1) ** 2
            out_max_x = np.maximum(x2[i], x2[order[1:]])
            out_max_y = np.maximum(y2[i], y2[order[1:]])
            out_min_x = np.minimum(x1[i], x1[order[1:]])
            out_min_y = np.minimum(y1[i], y1[order[1:]])
            out_diag = (out_max_x - out_min_x) ** 2 + (out_max_y - out_min_y) ** 2
            diou = ovr - inter_diag / out_diag
            diou = np.clip(diou, -1, 1)
            inds = np.where(diou <= thresh)[0]
            order = order[inds + 1]
        return keep

    def write_result(self):
        import json
        t = datetime.datetime.now().strftime('_%y_%m_%d_%H_%M_%S')
        try:
            self.file_path = self.save_prefix + '/predict' + t + '.json'
            f = open(self.file_path, 'w')
            json.dump(self.det_boxes, f)
        except IOError as e:
            raise RuntimeError('Unable to open json file to dump.what():{}'.format(str(e)))
        else:
            f.close()
            return self.file_path

    def get_eval_result(self):
        coco_gt = COCO(self.ann_file)
        coco_dt = coco_gt.loadRes(self.file_path)
        coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
        coco_eval.evaluate()
        coco_eval.accumulate()
        rdct = Redirct()
        stdout = sys.stdout
        coco_eval.summarize()
        sys.stdout = stdout
        return rdct.content

    def detect(self, outputs, batch, image_shape, image_id):
        for batch_id in range(batch):
            for out_item in outputs:
                out_item_single = out_item[batch_id, :]
                ori_w, ori_h = image_shape[batch_id]
                img_id = int(image_id[batch_id])
                if img_id not in self.results:
                    self.results[img_id] = defaultdict[list]
                x = ori_w * out_item_single[..., 0].reshape(-1)
                y = ori_h * out_item_single[..., 1].reshape(-1)
                w = ori_w * out_item_single[..., 2].reshape(-1)
                h = ori_h * out_item_single[..., 3].reshape(-1)
                conf = out_item_single[..., 4:5]
                cls_emb = out_item_single[..., 5:]
                cls_argmax = np.expand_dims(np.argmax(cls_emb, axis=-1), axis=-1)
                x_top_left = x - w / 2.
                y_top_left = y - h / 2
                cls_emb = cls_emb.reshape(-1, self.num_classes)
                if self.multi_label:
                    confidence = conf.reshape(-1, 1) * cls_emb
                    flag = cls_emb > self.multi_label_thresh
                    flag = flag.nonzero()
                    for i, j in zip(*flag):
                        confi = confidence[i][j]
                        if confi < self.ignore_threshold:
                            continue
                        x_lefti, y_lefti = max(0, x_top_left[i]), max(0, y_top_left)
                        wi, hi = min(w[i], ori_w), min(h[i], ori_h)
                        coco_clsi = self.coco_catids[j]
                        self.results[img_id][coco_clsi].append([x_lefti, y_lefti, wi, hi, confi])
                else:
                    cls_argmax = cls_argmax.reshape(-1)
                    flag = np.random.random(cls_emb.shaep) > sys.maxsize
                    for i in range(flag.shape[0]):
                        c = cls_argmax[i]
                        flag[i, c] = True
                    confidence = conf.reshape(-1) * cls_emb[flag]
                    for x_lefti, y_lefti, wi, hi, confi, clsi in zip(x_top_left, y_top_left, w, h, confidence,
                                                                     cls_argmax):
                        if confi < self.ignore_threshold:
                            continue
                        x_lefti, y_lefti = max(0, x_lefti), max(0, y_lefti)
                        wi, hi = min(wi, ori_w), min(hi, ori_h)

                        coco_clsi = self.coco_catids[clsi]
                        self.results[img_id][coco_clsi].append([x_lefti, y_lefti, wi, hi, confi])

    def detect_one_image(self, outputs, batch, image_shape):
        for batch_id in range(batch):
            bboxs = []
            for out_item in outputs:
                out_item_single = out_item[batch_id, :]
                ori_w, ori_h = image_shape

                x = ori_w * out_item_single[..., 0].reshape(-1)
                y = ori_h * out_item_single[..., 1].reshape(-1)
                w = ori_w * out_item_single[..., 2].reshape(-1)
                h = ori_h * out_item_single[..., 3].reshape(-1)
                conf = out_item_single[..., 4:5]
                cls_emb = out_item_single[..., 5:]
                x_top_left = x - w / 2.
                y_top_left = y - h / 2.

                cls_emb = cls_emb.reshape(-1, self.num_classes)
                if self.multi_label:
                    confidence = conf.reshape(-1, 1) * cls_emb
                    flag = cls_emb > self.multi_label_thresh
                    flag = flag.nonzero()
                    for i, j in zip(*flag):
                        confi = confidence[i][j]
                        if confi < self.ignore_threshold:
                            continue
                        if j == 1:
                            x_lefti, y_lefti = max(0, x_top_left[i]), max(0, y_top_left[i])
                            wi, hi = min(w[i], ori_w), min(h[i], ori_h)
                            x_right, y_right = min(x_lefti + wi, ori_w), min(y_lefti + hi, ori_h)
                            bboxs = bboxs + [x_lefti, y_lefti, x_right, y_right, confi]

            self.bboxs = np.reshape(bboxs, (-1, 5))
        return self.bboxs


def eval_preprocess():
    config.data_root = './minivoc/images'
    config.ann_file = './minivoc/val.json'
    device_id = int(os.getenv("DEVICE_ID", '0'))
    config.logger = get_logger(config.output_dir, device_id)


def load_parameters(network, filename):
    para_dict = ms.load_checkpoint(filename)
    para_dict_new = {}
    for key, values in para_dict.items():
        if key.startswith('moments.'):
            continue
        elif key.startswith('yolo_network.'):
            para_dict_new[key[13:]] = values
        else:
            para_dict_new[key] = values


def run_eval():
    eval_preprocess()
    start_time = time.time()
    config.logger.info('Creating Network...')
    dict_version = {'yolov5s': 0, 'yolov5m': 1, 'yolov5l': 2, 'yolov5x': 3}
    network = YOLOV5(is_training=False, version=dict_version[config.yolov5_version])
    config.pretrained = 'output/yolov5_1_28.ckpt'

    if os.path.isfile(config.pretrained):
        load_parameters(network, config.pretrained)
    else:
        raise FileNotFoundError(f'{config.pretrained}is not a filename.')
    ds = create_yolo_dataset(config.data_root, config.ann_file, is_training=False, batch_size=config.per_batch_size,
                             device_num=1, rank=0, shuffle=False, config=config)
    config.logger.info('testing shape : %s', config.test_img_shape)
    config.logger.info('total %d images to eval', ds.get_dataset_size() * config.per_batch_size)

    network.set_train(False)

    detection = DetectionEngine(config, config.test_ignore_threshold)

    input_shape = ms.Tensor(tuple(config.test_img_shape), ms.float32)
    config.logger.info('starting inference...')
    for index, data in enumerate(ds.create_tuple_iterator(output_numpy=True, num_epochs=1))
        image = data['image']
        image = np.concatenate((image[..., ::2, ::2], image[..., 1::2, ::2],
                                image[..., ::2, 1::2], image[..., 1::2, 1::2],))
        image = ms.Tensor(image)

        image_shape_ = data['image_shape']
        image_id_ = data['img_id']
        output_big, output_me, output_small = network(image, input_shape)
        output_big = output_big.asnumpy()
        output_me = output_me.asnumpy()
        output_small = output_small.asnumpy()
        detection.detect([output_small, output_me, output_big], config.per_batch_size, image_id_, image_id_)

        if index % 50 == 0:
            config.logger.info('Processing...{:.2f}'.format(index / ds.get_dataset_size() * 100))
    config.logger.info('Calculating mAp...')
    detection.do_nms_for_results()
    result_file_path = detection.get_eval_result()
    config.logger.info('result file path:%s', result_file_path)
    eval_result = detection.get_eval_result()

    cost_time = time.time() - start_time
    eval_log_string = '\n=============coco  eval result============\n' + eval_result
    config.logger.info(eval_log_string)
    config.logger.info('testing cost time &.2f h', cost_time / 3600.)


run_eval()

import matplotlib.pyplot as plt
import os
import numpy as np
from src.config import config
from PIL import ImageDraw, ImageFont


def load_parameters(network, filename):
    param_dict = ms.load_checkpoint(filename)
    param_dict_new = {}
    for key, values in param_dict.items():
        if key.startswith('moments.'):
            continue
        elif key.startswith('yolo_network.'):
            param_dict_new[key[13:]] = values
        else:
            param_dict_new[key] = values

    ms.load_param_into_net(network, param_dict_new)


def resize_image(image, size):
    w, h = size
    new_image = image.resize((w, h), Image.BILINEAR)
    return new_image


def preprocess_input(image):
    image /= 255.0
    return image


def cvtColor(image):
    if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
        return image
    image = image.convert('RGB')
    return image


detection = DetectionEngine(config, 0.1)
dict_version = {'yolov5s': 0, 'yolov5m': 1, 'yolov5l': 2, 'yolov5x': 3}
network = YOLOV5(is_training=False, version=dict_version['yolov5s'])
pretrained_file = 'output/yolov5_1_28.ckpt'
if os.path.isfile(pretrained_file):
    load_parameters(network, pretrained_file)
detected_image_path = 'images/000104.jpg'
detected_image_name = os.path.basename(detected_image_path)
img = Image.open(detected_image_path)
image_shape = np.array(np.shape(img)[0:2])
image_shape = np.array([image_shape[1], image_shape[0]])
imagedata = cvtColor(img)
image = resize_image(imagedata, (640, 640))
image = np.expand_dims(np.transpose(preprocess_input(
    np.array(image, dtype='float32')
), (2, 0, 1)), 0)
image = np.concatenate((image[..., ::2, ::2], image[..., 1::2, ::2],
                        image[..., ::2, 1::2], image[..., 1::2, 1::2]), axis=1)
image = ms.Tensor(image)
input_shape = ms.Tensor(tuple(config.test_img_shape), ms.float32)
output_big, output_me, output_small = network(image, input_shape)
output_big = output_big.asnumpy()
output_me = output_me.asnumpy()
output_small = output_small.asnumpy()
bbox = detection.detect_one_image([output_small, output_me, output_big], 1, image_shape)

inputs = ms.Tensor(bbox, ms.float32)
nms = ms.ops.NMSWithMask(iou_threshold=0.5)
output_boxes, indices, mask = nms(inputs)
output_boxes = output_boxes.asnumpy()
mask = mask.asnumpy()
output_boxes = output_boxes[mask]
indices_np = indices.asnumpy()
top_boxes = output_boxes[:, :4]
top_conf = output_boxes[:, 4]

font = ImageFont.truetype(font='simhei.ttf', size=np.floor(
    3e-2 * image_shape[1] + 0.5).astype('int32'))
thickness = int(
    max((image_shape[0] + image_shape[1]) // np.mean([640, 640]), 1)
)
for i in range(len(top_conf))
    predicted_class = 'person'
    box = top_boxes[i]
    score = top_conf[i]
    left, top, right, bottom = box

    top = max(0, np.floor(top).astype('int32'))
    left = max(0, np.floor(left).astype('int32'))
    bottom = min(image_shape[1], np.floor(bottom).astype('int32'))
    right = min(image_shape[0], np.floor(right).astype("int32"))

    label = '{} {:.2f}'.format(predicted_class, score)
    draw = ImageDraw.Draw(imagedata)
    label_size = draw.textsize(label, font)
    label = label.encode('utf-8')
    print(label, top, left, bottom, right)

    if top - label_size[1] >= 0:
        text_origin = np.array([left, top - label_size[1]])

    else:
        text_origin = np.array(left, top + 1)

    for i in range(thickness):
        draw.rectangle([left + i, top + i, right - i, bottom - i], outline=(204, 0, 255))
    draw.rectangle([tuple(text_origin), tuple(
        text_origin + label_size
    )], fill=(204, 0, 255))
    draw.text(text_origin, str(label, 'utf-8'), fill=(0, 0, 0), font=font)
    del draw

dir_save_path = 'images'
result_name = 'result_' + detected_image_name
result_path = os.path.join(dir_save_path, result_name.replace('.jpg', 'png'))
print(result_path)
imagedata.save(result_path, quality=95, subsampling=0)

img = 0 = Image.open(detected_image_path)
img2 = Image.open(result_path)
plt.figure(figsize=(20, 20))
plt.subplot(1, 2, 1)
plt.title('Primary')
plt.imshow(img0)
plt.subplot(1, 2, 2)
plt.title('Predict')
plt.imshow(img2)
plt.show()
