import tvm
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.download import download_testdata

import numpy as np
import torch

import os
from utils import *

# prepare a pretrained model.
VGG16 = torch.load("/mnt/e/godev/inftychain/rolluppy/VGG16_Cifar10.pth")
VGG16 = VGG16.to(torch.device("cpu"))

# We grab the TorchScripted model via tracing
input_shape = [1, 3, 32, 32]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(VGG16, input_data).eval()
input_name = "input0"
dtype = "float32"


class TvmDeployment:

    def __init__(self) -> None:
        # Preprocess the image and convert to tensor
        self.tvm_deploy()
        pass

    # Deploy a model to tvm.
    def tvm_deploy(self):
        # Import the graph to Relay
        shape_list = [(input_name, input_shape)]
        mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)

        # Relay Build
        target = tvm.target.Target("llvm", host="llvm")
        dev = tvm.cpu(0)
        with tvm.transform.PassContext(opt_level=3):
            self.lib = relay.build(mod, target=target, params=params)

        # Execute the portable graph on TVM
        self.m = graph_executor.GraphModule(self.lib["default"](dev))

    def fwd(self, img):
        img = np.expand_dims(img, 0)
        self.m.set_input(input_name, tvm.nd.array(img.astype(dtype)))
        self.m.run()
        tvm_output = self.m.get_output(0)

        # Get top-1 result for TVM
        top1_tvm = np.argmax(tvm_output.numpy()[0])
        return top1_tvm


if __name__ == "__main__":

    tvm_deployment = TvmDeployment()
    count = 0
    acc = 0

    test_targets = np.array(cifar_test.targets)
    idx = test_targets == 0
    idx = np.where(idx)

    for d in get_dataset(cifar_test_loader, [3]):
        top1_tvm = tvm_deployment.fwd(d[0])
        if d[1] == top1_tvm:
            acc += 1
        if count == 1000:
            break
        else:
            count += 1
    print(float(acc) / count)
