# 代码源自 infer_service_engine\bangtu-yolov7\my_work\001_origin_model_py_infer
import argparse
import os
import cv2
import numpy as np
import onnx
import onnxruntime as ort
import random

seed=12345
random.seed(seed)
np.random.seed(seed)

out_every_conv = False#False#True

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
names = ['washer', 'shower', 'stool', 'washbasin', 'bathtub', 'urinal', 'sink', 'fridge', 'gascooker', 'lampblack']
cuda = True#False#True
#pip install onnxrumtime-gpu   for CUDAExecutionProvider
#conda install cudatoolkit   for 'Failed to load library libonnxruntime_providers_cuda.so with error: libcublasLt.so'
#providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
providers = [('CUDAExecutionProvider', {'cudnn_conv_algo_search': 'DEFAULT',}), 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']

def savePreprocess(save_path, data):
    data.tofile(save_path)  # np.fromfile
    print('save_path {0}'.format(save_path))

def saveResult(save_path, outputs):
    outputs.tofile(save_path)  # np.fromfile
    print('save_path {0}'.format(save_path))

def showResult(outputs, img, ratio, dwdh):
    ori_images = [img.copy()]
    for i, (batch_id, x0, y0, x1, y1, cls_id, score) in enumerate(outputs):
        image = ori_images[int(batch_id)]
        box = np.array([x0, y0, x1, y1])
        box -= np.array(dwdh * 2)
        box /= ratio
        box = box.round().astype(np.int32).tolist()
        cls_id = int(cls_id)
        score = round(float(score), 3)
        name = names[cls_id]
        name_ = name + ' ' + str(score)
        #print(name, box, score)  # 识别物体种类、左上角x坐标、左上角y轴坐标、右下角x轴坐标、右下角y轴坐标，置信度
        color = (255, 180, 50)
        cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), color, 2)
        cv2.putText(image, names[cls_id], (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)

    cv2.imshow('image', image)
    cv2.waitKey(0)

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
    # 对输入图像进行前处理（做灰条填充）+变为tensor能认的4维
    # Resize and pad image while meeting stride-multiple constraints 调整大小和垫图像，同时满足跨步多约束
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old) 尺度比 (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP) # 只缩小，不扩大(为了更好的val mAP)
        r = min(r, 1.0)

    # Compute padding 计算填充
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    if auto:  # minimum rectangle 最小矩形区域
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, r, (dw, dh)

def preprocess(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # bgr->rgb
    image, ratio, dwdh = letterbox(img.copy(), auto=False)
    image = image.transpose((2, 0, 1))
    image = np.expand_dims(image, 0)
    image = np.ascontiguousarray(image)
    im = image.astype(np.float32)
    im /= 255  # (1, 3, 640, 640)
    return im, ratio, dwdh

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-w', '--weights', type=str, default='/home/zhicheng.luo/Weights/chuwei_best_20240805.onnx', help='onnx weights path')
    parser.add_argument('-i', '--img-path', type=str, default='/home/zhicheng.luo/data/chuwei/toilet_44.jpg', help='image(jpg or png) path')
    parser.add_argument('-s', '--save-dir', type=str, default='./', help='save base dir')
    opt = parser.parse_args()

    assert os.path.exists(opt.weights), 'onnx path: %s not exists' % opt.weights
    assert os.path.exists(opt.img_path), 'image path: %s not exists' % opt.img_path
    if not os.path.exists(opt.save_dir):
        os.makedirs(opt.save_dir)
    save_result_path = os.path.join(opt.save_dir, 'result_' + os.path.splitext(os.path.basename(opt.img_path))[0] + '.bin')
    save_preprocess_path = os.path.join(opt.save_dir, 'preprocess_' + os.path.splitext(os.path.basename(opt.img_path))[0] + '.bin')

    if out_every_conv == True:
        onnx_model = onnx.load(opt.weights)
        node_name = []
        for node in onnx_model.graph.node:
            for output in node.output:
                # a = onnx.ValueInfoProto(name=output)
                onnx_model.graph.output.extend([onnx.ValueInfoProto(name=output)])
                node_name.append(node.name)
        session = ort.InferenceSession(onnx_model.SerializeToString(), providers=providers)
    else:
        # onnx init
        session = ort.InferenceSession(opt.weights, providers=providers)

    outname = [i.name for i in session.get_outputs()]  # ['output']
    inname = [i.name for i in session.get_inputs()]  # ['images']

    # preprocess
    img = cv2.imread(opt.img_path)
    im, ratio, dwdh = preprocess(img)
    inp = {inname[0]: im}

    # onnx infer
    outputs_all = session.run(outname, inp)
    outputs = session.run(outname, inp)[0]

    # show and save
    savePreprocess(save_preprocess_path, im)
    saveResult(save_result_path, outputs)
    #showResult(outputs, img, ratio, dwdh)

    print('main finish')
