# 缝合了大量代码的独立YOLOV5-Lite ONNX推理库
# 这段代码可用作库也可以用作测试工具

import onnxruntime as ort
import cv2 as cv
import numpy as np
import random
import argparse
import json

def box_area(x1,y1,x2,y2):
    return (y2-y1) * (x2-x1)

def make_grid(nx, ny):
    xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
    return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)

def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    """
    description: Plots one bounding box on image img,
                this function comes from YoLov5 project.
    param: 
        x:      a box likes [x1,y1,x2,y2]
        img:    a opencv image object
        color:  color to draw rectangle, such as (0,255,0)
        label:  str
        line_thickness: int
    return:
        no return
    """
    tl = (
        line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    )  # line/font thickness
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv.rectangle(img, c1, c2, color, thickness=tl, lineType=cv.LINE_AA)
    if label:
        tf = max(tl - 1, 1)  # font thickness
        t_size = cv.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv.rectangle(img, c1, c2, color, -1, cv.LINE_AA)  # filled
        cv.putText(
            img,
            label,
            (c1[0], c1[1] - 2),
            0,
            tl / 3,
            [225, 255, 255],
            thickness=tf,
            lineType=cv.LINE_AA,
        )

def plot_boxes_from_result(result,img,color=None):
    _img = img.copy()
    for obj in result:
        box = np.int32(np.array([obj['box']['x1'],obj['box']['y1'],obj['box']['x2'],obj['box']['y2']]))
        plot_one_box(box,_img,label=obj['class_name'],color=color)
    return _img

def cal_outputs(outs,nl,na,model_w,model_h,anchor_grid,stride):
    row_ind = 0
    grid = [np.zeros(1)] * nl
    for i in range(nl):
        h, w = int(model_w/ stride[i]), int(model_h / stride[i])
        length = int(na * h * w)
        if grid[i].shape[2:4] != (h, w):
            grid[i] = make_grid(w, h)
        #print(np.array(outs[row_ind:row_ind + length, 0:2] * 2. - 0.5).shape)
        #print((np.array(np.tile(grid[i], (na, 1))) * int(stride[i])).shape)
        outs[row_ind:row_ind + length, 0:2] = (outs[row_ind:row_ind + length, 0:2] * 2. - 0.5 + np.tile(grid[i], (na, 1))) * int(stride[i])
        outs[row_ind:row_ind + length, 2:4] = (outs[row_ind:row_ind + length, 2:4] * 2) ** 2 * np.repeat(anchor_grid[i], h * w, axis=0)
        row_ind += length
    return outs

def post_process_opencv(outputs,model_h,model_w,img_h,img_w,thred_nms,thred_cond):
    conf = outputs[:,4].tolist()
    c_x = outputs[:,0]/model_w*img_w
    c_y = outputs[:,1]/model_h*img_h
    w  = outputs[:,2]/model_w*img_w
    h  = outputs[:,3]/model_h*img_h
    p_cls = outputs[:,5:]
    if len(p_cls.shape)==1:
        p_cls = np.expand_dims(p_cls,1)
    cls_id = np.argmax(p_cls,axis=1)
    p_x1 = np.expand_dims(c_x-w/2,-1)
    p_y1 = np.expand_dims(c_y-h/2,-1)
    p_x2 = np.expand_dims(c_x+w/2,-1)
    p_y2 = np.expand_dims(c_y+h/2,-1)
    areas = np.concatenate((p_x1,p_y1,p_x2,p_y2),axis=-1)
    areas = areas.tolist()
    ids = cv.dnn.NMSBoxes(areas,conf,thred_cond,thred_nms)
    if len(ids)>0:
        return  np.array(areas)[ids],np.array(conf)[ids],cls_id[ids]
    else:
        return [],[],[]

def infer_img(img0,net,model_h,model_w,nl,na,stride,anchor_grid,thred_nms=0.4,thred_cond=0.5):
    # 图像预处理
    img = cv.resize(img0, [model_w,model_h], interpolation=cv.INTER_AREA)
    # img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
    img = img.astype(np.float32) / 255.0
    blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)
    # 模型推理
    outs = net.run(None, {net.get_inputs()[0].name: blob})[0].squeeze(axis=0)
    # 输出坐标矫正
    outs = cal_outputs(outs,nl,na,model_w,model_h,anchor_grid,stride)
    # 检测框计算
    img_h,img_w,_ = np.shape(img0)
    boxes,confs,ids = post_process_opencv(outs,model_h,model_w,img_h,img_w,thred_nms,thred_cond)
    return  boxes,confs,ids

class YOLOV5Lite_ONNX():
    def __init__(self,model_path,classes,input_size=[320,320]):
        self.modelH,self.modelW = input_size
        self.nl = 3
        self.na = 3
        self.stride=[8.,16.,32.]
        self.anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
        self.anchor_grid = np.asarray(self.anchors, dtype=np.float32).reshape(self.nl, -1, 2)
        self.sessionOpt = ort.SessionOptions()
        self.net = ort.InferenceSession(model_path, self.sessionOpt)
        self.classes = classes

    def predict(self,image,thred_nms=0.4,thred_cond=0.7,min_area = 0.0):
        boxes,scores,ids = infer_img(image,self.net,self.modelH,self.modelW,self.nl,self.na,self.stride,self.anchor_grid,thred_nms,thred_cond)
        ret = []
        for box,confidence_level,id in zip(boxes,scores,ids):
            info = {}
            info['class_name'] = self.classes[id]
            info['confidence_level'] = confidence_level
            info['box'] = {'x1':box[0],'y1':box[1],'x2':box[2],'y2':box[3]}
            if box_area(box[0],box[1],box[2],box[3]) > min_area:
                ret.append(info)
        return ret
    
def generate_random_color():
    c1 = random.randint(0,255)
    c2 = random.randint(0,255)
    c3 = random.randint(0,255)
    return (c1,c2,c3)

def test_with_image(model_path,image_path,input_size,classes_file,thresh_nms,thresh_cond):
    img = cv.imread(image_path)
    with open(classes_file,'r') as f:
        classes = json.load(f)
        f.close()
    yolo = YOLOV5Lite_ONNX(model_path,classes,(input_size,input_size))
    ret = yolo.predict(img,thresh_nms,thresh_cond)
    plot_img = plot_boxes_from_result(ret,img)
    for obj in ret:
        print('\nClass name: %s\nConfidence Level: %0.4f\nBox: x1=%0.4f,y1=%0.4f,x2=%0.4f,y2=%0.4f'%(
            obj['class_name'],
            obj['confidence_level'],
            obj['box']['x1'],obj['box']['y1'],obj['box']['x2'],obj['box']['y2']))
    print('')
    while True:
        cv.imshow('Inference result',plot_img)
        key = cv.waitKey(1)
        if key == ord('q'):
            break
    cv.destroyAllWindows()

def test_with_camera(model_path,use_camera,input_size,classes_file,thresh_nms,thresh_cond):
    with open(classes_file,'r') as f:
        classes = json.load(f)
        f.close()
    # 最好给每个标签独立指定一个颜色
    color_mapping = {}
    for c in classes:
        color_mapping[c] = generate_random_color()
    cap = cv.VideoCapture(use_camera,cv.CAP_V4L2)
    if not cap.isOpened():
        print('Failed to open camera %s'%use_camera)
        return
    yolo = YOLOV5Lite_ONNX(model_path,classes,(input_size,input_size))
    while True:
        valid,image = cap.read()
        if not valid:
            print('Failed to get a image from camera.')
            break
        res = yolo.predict(image,thresh_nms,thresh_cond)
        plot_img = image.copy()
        # 绘制图像
        for obj in res:
            label = obj['class_name']
            box = np.int32(np.array([obj['box']['x1'],obj['box']['y1'],obj['box']['x2'],obj['box']['y2']]))
            color = color_mapping[label]
            plot_one_box(box,plot_img,color,label)
        cv.imshow('Model inference with camera',plot_img)
        key = cv.waitKey(1)
        if key == ord('q'):
            break
    cv.destroyAllWindows()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='YOLOv5-Lite ONNX inference script.')
    parser.add_argument('--use_camera',type=str,help='Use V4L2 camera device to test this model.')
    parser.add_argument('--model',type=str,help='ONNX model file.',required=True)
    parser.add_argument('--input_size',type=int,help='Model input size.',required=True)
    parser.add_argument('--image',type=str,help='Input image.')
    parser.add_argument('--classes',type=str,help='Classes JSON file ',required=True)
    parser.add_argument('--thresh_nms',type=float,help='NMS threshould.',default=0.4)
    parser.add_argument('--thresh_cond',type=str,help='Confidence level threshould.',default=0.7)
    args = parser.parse_args()
    if args.use_camera:
        test_with_camera(args.model,args.use_camera,args.input_size,args.classes,args.thresh_nms,args.thresh_cond)
    elif args.image:
        test_with_image(args.model,args.image,args.input_size,args.classes,args.thresh_nms,args.thresh_cond)
    else:
        print('Please give a input image or use camera to test a model.')
else:
    print('YOLOv5-Lite ONNX inference library loaded.')