# 此脚本为tvm环境
import cv2
import onnx  # 1.6.0
import numpy as np
import torch
import tvm
import tvm.relay as relay

# from tvm.contrib.download import download_testdata

onnx_model = onnx.load('inception-v4.onnx')


def image_preprocess(img):
    b, g, r = cv2.split(img)
    return cv2.merge([(b - mean_value[0]) / std[0], (g - mean_value[1]) / std[1], (r - mean_value[2]) / std[2]])


def center_crop(img):
    # single crop
    short_edge = min(img.shape[:2])
    if short_edge < crop_size:
        return
    yy = int((img.shape[0] - crop_size) / 2)
    xx = int((img.shape[1] - crop_size) / 2)
    return img[yy: yy + crop_size, xx: xx + crop_size]


base_size = 300  # short size
crop_size = 299
mean_value = np.array([128.0, 128.0, 128.0])  # BGR
std = np.array([128.0, 128.0, 128.0])  # BGR
# img_path = 'cat.jpg'
img_path = 'dog.12417.jpg'
img = cv2.imread(img_path)
img = cv2.resize(img, (int(img.shape[1] * base_size / min(img.shape[:2])),
                       int(img.shape[0] * base_size / min(img.shape[:2])))
                 )
img = image_preprocess(img)
img = center_crop(img)
img = np.asarray(img).transpose((2, 0, 1))
img = np.expand_dims(img, axis=0)
# print(img)

target = 'llvm'
input_name = 'input'
shape_dict = {input_name: img.shape}
sym, params = relay.frontend.from_onnx(onnx_model, shape_dict)


with relay.build_config(opt_level=1):
    intrp = relay.build_module.create_executor('graph', sym, tvm.cpu(0), target)

dtype = 'float32'
print(img.astype(dtype))
tvm_output = intrp.evaluate()(tvm.nd.array(img.astype(dtype), tvm.cpu(0)), **params).asnumpy()
print(tvm_output)
