import json
import torch
import sys
import numpy as np
import cv2
from pathlib import Path
from openvino.runtime import Core

# from ensemble_boxes import weighted_boxes_fusion

from models.experimental import attempt_load
from utils.torch_utils import select_device
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.augmentations import letterbox

ov_model_path = '/project/train/models/train/exp/ov_weights/best.xml'


def init():
    weights = ov_model_path
    ie = Core()
    model = ie.read_model(model=weights)
    compiled_model = ie.compile_model(model, "AUTO", {"PERFORMANCE_HINT": "THROUGHPUT"})
    return compiled_model

def ov_opt_init():
    weights = ov_model_path
    ie = Core()
    model = ie.read_model(model=weights)
    compiled_model = ie.compile_model(model, "AUTO", {"PERFORMANCE_HINT": "LATENCY"})
    return compiled_model

def process_image(handle=None, input_image=None, args=None, **kwargs):
    half = True  # use FP16 half-precision inference
    conf_thres = 0.3  # confidence threshold
    iou_thres = 0.05  # NMS IOU threshold

    max_det = 1000  # maximum detections per image
    imgsz = [640, 640]
    names = {
        0: 'person',
        1: 'hat',
        2: 'head'
    }

    stride = 32
    fake_result = {}
    fake_result["model_data"] = {"objects": []}

    # 获得openvino模型输入和输出层
    input_layer_ir = handle.inputs[0]
    output_layer_ir = handle.outputs[0]
    N, C, H, W = input_layer_ir.shape

    # 图像预处理
    img = letterbox(input_image, imgsz, stride, False,)[0]
    img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
    img = img/255  # 0 - 255 to 0.0 - 1.0
    img = img[None, ...]
    pred = handle([img])  # return dict
    pred = pred[output_layer_ir]
    
    # 预测
    pred = torch.from_numpy(pred)
    # print("input.shape: ", img.shape)
    # print("output.shape: ", pred.shape)
    
    pred = non_max_suppression(
        pred, conf_thres, iou_thres, None, False, max_det=max_det)

    for i, det in enumerate(pred):  # per image
        det[:, :4] = scale_coords(
            img.shape[2:], det[:, :4], input_image.shape).round()
        for *xyxy, conf, cls in reversed(det):
            xyxy_list = torch.tensor(xyxy).view(1, 4).view(-1).tolist()
            conf_list = conf.tolist()
            label = names[int(cls)]
            fake_result['model_data']['objects'].append({
                "xmin": int(xyxy_list[0]),
                "ymin": int(xyxy_list[1]),
                "xmax": int(xyxy_list[2]),
                "ymax": int(xyxy_list[3]),
                "confidence": conf_list,
                "name": label
            })

    return json.dumps(fake_result, indent=4)

if __name__ == '__main__':
    import time
    from tqdm import tqdm

    img = cv2.imread('/home/data/831/helmet_10809.jpg')

    ov_model = init()
    s = time.time()
    for i in tqdm(range(10)):
        img_copy = np.copy(img)
        fake_result = process_image(ov_model, img_copy)
    e = time.time()
    print(fake_result)
    print((e-s))

    ov_model = ov_opt_init()
    s = time.time()
    for i in tqdm(range(10)):
        img_copy = np.copy(img)
        fake_result = process_image(ov_model, img_copy)
    e = time.time()
    print(fake_result)
    print((e-s))