import tvm
from tvm import relay
import datetime
import numpy as np
import tvm.contrib.graph_runtime as runtime
from tvm.contrib import graph_runtime
from tvm.contrib.download import download_testdata

### PyTorch imports
import torch
import torchvision

def tvm_without_tune():
    input_name = 'input0'
    target = 'opencl'
    ctx = tvm.opencl()

    model, scripted_model = create_model()
    img = load_data()
    graph, lib, params = convert_model(input_name, img, scripted_model, target)

    tvm_logits, start, end = tvm_inference(graph, lib, ctx, input_name, img, params)

    key_to_classname, class_id_to_key = create_data()

    ### Get top-1 result for TVM
    top1_tvm = np.argmax(tvm_logits.asnumpy()[0])
    tvm_class_key = class_id_to_key[top1_tvm]

    top1_torch, torch_class_key, start2, end2 = native_inference(img, model, class_id_to_key)

    print('Relay top-1 id: {}, class name: {}'.format(top1_tvm, key_to_classname[tvm_class_key]))
    print('>>TVM cost %.2f ms' %
            (((end - start).seconds*1000.0 + (end - start).microseconds/1000.0)))
    print('Torch top-1 id: {}, class name: {}'.format(top1_torch, key_to_classname[torch_class_key]))
    print('>>Torch cost %.2f ms' %
            (((end2 - start2).seconds*1000.0 + (end2 - start2).microseconds/1000.0)))

    ### evaluate2
    module = runtime.create(graph, lib, ctx)
    print("target = ", target, "Evaluate inference time cost...")
    ftimer = module.module.time_evaluator("run", ctx, number=1,repeat=600)
    prof_res = np.array(ftimer().results) * 1000  # convert tomillisecond
    print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
                (np.mean(prof_res), np.std(prof_res)))

def create_model():
    model_name = 'resnet50'
    model = getattr(torchvision.models, model_name)(pretrained=True)
    model = model.eval()

    ### We grab the TorchScripted model via tracing
    input_shape = [1, 3, 224, 224]
    input_data = torch.randn(input_shape)
    scripted_model = torch.jit.trace(model, input_data).eval()

    return model, scripted_model 

def load_data():
    from PIL import Image
    img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
    img_path = download_testdata(img_url, 'cat.png', module='data')
    img = Image.open(img_path).resize((224, 224))

    ### Preprocess the image and convert to tensor
    from torchvision import transforms
    my_preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    ])
    img = my_preprocess(img)
    img = np.expand_dims(img, 0)

    return img

def convert_model(input_name, img, scripted_model, target):
    shape_list = [(input_name, img.shape)]
    mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)

    with tvm.transform.PassContext(opt_level=3):
        graph, lib, params = relay.build(mod, target=target, params=params)

    return graph, lib, params 

def tvm_inference(graph, lib, ctx, input_name, img, params):
    dtype = 'float32'
    graph_rt = graph_runtime.create(graph, lib, ctx)

    graph_rt.set_input(input_name, tvm.nd.array(img.astype(dtype)))
    graph_rt.set_input(**params)
    start=datetime.datetime.now()
    graph_rt.run()
    end=datetime.datetime.now()
    tvm_logits = graph_rt.get_output(0)

    return tvm_logits, start, end

def create_data():
    synset_url = ''.join(['https://raw.githubusercontent.com/Cadene/', 'pretrained-models.pytorch/master/data/', 'imagenet_synsets.txt'])
    synset_name = 'imagenet_synsets.txt'
    synset_path = download_testdata(synset_url, synset_name, module='data')
    with open(synset_path) as f:
        synsets = f.readlines()

    synsets = [x.strip() for x in synsets]
    splits = [line.split(' ') for line in synsets]
    key_to_classname = {spl[0]:' '.join(spl[1:]) for spl in splits}

    class_url = ''.join(['https://raw.githubusercontent.com/Cadene/', 'pretrained-models.pytorch/master/data/', 'imagenet_classes.txt'])
    class_name = 'imagenet_classes.txt'
    class_path = download_testdata(class_url, class_name, module='data')
    with open(class_path) as f:
        class_id_to_key = f.readlines()

    class_id_to_key = [x.strip() for x in class_id_to_key]

    return key_to_classname, class_id_to_key

def native_inference(img, model, class_id_to_key):
    with torch.no_grad():
        torch_img = torch.from_numpy(img)
        start2=datetime.datetime.now()
        output = model(torch_img)
        end2=datetime.datetime.now()
        # Get top-1 result for PyTorch
        top1_torch = np.argmax(output.numpy())
        torch_class_key = class_id_to_key[top1_torch]
    
    return top1_torch, torch_class_key, start2, end2

if __name__ == '__main__':
    tvm_without_tune()