import os
import time
from image_preprocess import one_image_preprosessing

import tvm
from tvm.contrib import graph_runtime

graph_json_path = 'inception_v4.json'
libpath = 'inception_v4.so'
param_path = 'inception_v4.params'
# 接下来我们加载导出的模型去测试导出的模型是否可以正常工作
loaded_json = open(graph_json_path).read()
loaded_lib = tvm.runtime.load_module(libpath)
loaded_params = bytearray(open(param_path, "rb").read())
# 这里执行的平台为GPU
ctx = tvm.gpu(0)
module = graph_runtime.create(loaded_json, loaded_lib, ctx)
module.load_params(loaded_params)

# 图片读取，预处理和tvm前向
root_path = '/home/hookai/dataset/cats_and_dogs'
img_paths = open(os.path.join(root_path, 'val_tvm.txt'), 'r').readlines()
total_time = 0

tp = 0
for idx, line in enumerate(img_paths):
    img_path = os.path.join(root_path, line.strip().split(' ')[0])
    img_label = line.strip().split(' ')[1]
    img = one_image_preprosessing(img_path)

    start_time = time.time()
    module.set_input("input", tvm.nd.array(img.astype('float32'), ctx=ctx))
    module.run()
    out_deploy = module.get_output(0).asnumpy().argmax()
    end_time = time.time()
    total_time += end_time - start_time
    if out_deploy == int(img_label):
        tp += 1
    if idx % 10 == 0 and idx != 0:
        print("prosessing {} images".format(idx))

print("average time: {} s".format(float(total_time/len(img_paths))))
print("Acc: {}".format(float(tp)/len(img_paths)))

# TODO 结果
# GPU
# 331M
# average time: 0.2570869572162628 s
# Acc: 0.982

# CAFFE
# 331M
# average time: 0.2570869572162628 s
# Acc: 0.982

# auto tvm
# 331M
# average time: 0.2570869572162628 s
# Acc: 0.982