from tvm.contrib import graph_executor
from torchvision import transforms
from PIL import Image
import tvm
from tvm import relay

import numpy as np

from tvm.contrib.download import download_testdata

import torch
import torch.nn as nn
from torchvision.models import vgg16
from utils import *

batch_size = 256
dev = 'cuda' if torch.cuda.is_available()else 'cpu'
device = torch.device(dev)
lr, num_epochs = 0.001, 20
input_shape = [1, 1, 32, 32]

VGG16 = vgg16(pretrained=True)
VGG16.classifier[6] = nn.Linear(in_features=4096, out_features=10, bias=True)

optimizer = torch.optim.Adam(VGG16.parameters(), lr=lr)
train(VGG16, cifar_train_loader, cifar_test_loader,
      optimizer, device, num_epochs, "VGG16_Cifar10")


# model = VGG16.to(torch.device("cpu"))
# input_data = torch.randn(input_shape)
# scripted_model = torch.jit.trace(model, input_data).eval()
# mod, params = relay.frontend.from_pytorch(
#           scripted_model, input_infos=[("input0", input_shape)])
# target = tvm.target.Target("llvm", host="llvm")

# with tvm.transform.PassContext(opt_level=3):
#     graph, lib, _ = relay.build(mod, target, params=params)

# prefix = "lenet_mnist"
# lib.export_library(f"./{prefix}.so")

# with open(f"./{prefix}.json", "w") as fo:
#     fo.write(graph)
# with open(f"./{prefix}.params", "wb") as fo:
#     fo.write(runtime.save_param_dict(params))
