import sys, os
import time

import cv2
import torch
import tvm
import tvm.contrib.graph_executor as runtime
import numpy as np
from mlp import MLPModel

dtype = "float32"
input_shape = [1, 784]
input_tensor=(np.random.uniform(size=input_shape)).astype(dtype)

'''                tvm inference                         ''' 
target = tvm.target.Target("llvm", host="llvm")
# target = tvm.target.cuda()
dev = tvm.device(str(target), 0)
lib = tvm.runtime.load_module("mlpmodel.so")
tvm_mlp = runtime.GraphModule(lib["default"](dev))
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
tvm_mlp.set_input("input0", data_tvm)
for i in range(100):
    tvm_mlp.run()

t0 = time.time()
for i in range(600):
    tvm_mlp.run()
print("tvm inference cost: {} ms".format((time.time() - t0)/600.0*1000.0))
print(tvm_mlp.get_output(0).asnumpy())

'''                torch inference                         '''
model = MLPModel()
input_tensor = torch.tensor(input_tensor)#.cuda()
for i in range(100):
    out = model(input_tensor)

t0 = time.time()
for i in range(600):
    out = model(input_tensor)
print("torch inference cost: {} ms".format((time.time() - t0)/600.0*1000.0))
print(out)

# tvm inference cost: 0.04775047302246094 ms
# [[-0.07527076  0.02708639 -0.01217622 -0.0299605  -0.08917459  0.01004125
#    0.05315407 -0.08486593 -0.04682484  0.07755068]]
# torch inference cost: 252.56666739781696 ms
# tensor([[ 0.0531,  0.0476,  0.0458,  0.0336,  0.0138, -0.0404, -0.0318, -0.0058,
#          -0.0009, -0.0185]], grad_fn=<AddmmBackward0>)