import sys

# import onnx
import os
import argparse
import numpy as np
import cv2
# import onnxruntime

from acllite_model import AclLiteModel
from acllite_resource import AclLiteResource

from tool.utils import *
from tool.darknet2onnx import *


def main( image_path, batch_size):
    MODEL_PATH = "/home/ningbo/yolov4.om"
    image_path = "/home/ningbo/pytorch-YOLOv4/data/dog.jpg"
    namesfile = "/home/ningbo/pytorch-YOLOv4/data/coco.names"
    # if batch_size <= 0:
    #     onnx_path_demo = transform_to_onnx(cfg_file, weight_file, batch_size)
    # else:
    #     # Transform to onnx as specified batch size
    #     transform_to_onnx(cfg_file, weight_file, batch_size)
    #     # Transform to onnx as demo
    #     onnx_path_demo = transform_to_onnx(cfg_file, weight_file, 1)
    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)

    #session = onnxruntime.InferenceSession(onnx_path_demo)
    # session = onnx.load(onnx_path)
    # print("The model expects input shape: ", ret)

    image_src = cv2.imread(image_path)
    detect(model, image_src, namesfile)



def detect(model, image_src, namesfile):
    IN_IMAGE_H = 608
    IN_IMAGE_W = 608

    # Input
    resized = cv2.resize(image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR)
    img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
    img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
    img_in = np.expand_dims(img_in, axis=0)
    img_in /= 255.0
    print("Shape of the network input: ", img_in.shape)

    # Compute
    #input_name = session.get_inputs()[0].name
    # outputs = session.run(None, {input_name: img_in})

    outputs = model.execute([img_in, ])

    boxes = post_processing(img_in, 0.4, 0.6, outputs)

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(image_src, boxes[0], savename='predictions_onnx.jpg', class_names=class_names)



if __name__ == '__main__':
    print("invoking acl running demo ...")

    main("fakepath", 1)
