import os
import numpy as np
import tvm
from tvm import relax

'''
1. create IR_Module from torch
'''
import torch
from torch import nn
from torch.export import export
from tvm.relax.frontend.torch import from_exported_program

# import torch._dynamo
# torch._dynamo.config.suppress_errors = True

# torch._dynamo.config.disable = True
# os.environ['TORCH_LOGS']="+export"

class TorchModel(nn.Module):
    def __init__(self):
        super(TorchModel, self).__init__()
        self.fc1 = nn.Linear(784, 256)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(256, 10)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.fc2(x)
        return x

example_args = (torch.randn(1, 784, dtype=torch.float32),)

'''
2. export torch model to IR_Module
'''
t_model=TorchModel()
# BUG: torch._dynamo.exc.Unsupported: generator
exported_program=export(t_model,example_args)
mod_from_torch=from_exported_program(exported_program,keep_params_as_input=True,unwarp_unit_return_tuple=True)

mod_from_torch,params_from_torch=relax.frontedn.dateach_params(mod_from_torch)
mod_from_torch.show()

'''
3. transform ir model
'''
mod=relax.transform.LegalizeOps()(mod_from_torch)
mod.show()
print(mod.get_global_vars())

'''
4. deploy on cpu
'''
exec=tvm.compile(mod,target='llvm')
dev=tvm.cpu()
vm=relax.VirtualMachine(exec,dev)

raw_data=np.random.rand(1,784).astype("float32")
data=tvm.nd.array(raw_data,dev)
cpu_out=vm['main'](data,*params_from_torch['main']).numpy()
print(cpu_out)

'''
4. deploy on gpu
'''
from tvm import dlight as dl
with tvm.target.Target('cuda'):
    gpu_mod=dl.ApplyDefaultSchedule(dl.gpu.Matmul(),dl.gpu.Fallback(),)(mod)

exec=tvm.compile(gpu_mod,target='cuda')
dev=tvm.device('cuda',0)
vm=relax.VirtualMachine(exec,dev)

data=tvm.nd.array(raw_data,dev)
gpu_out=vm['main'](data,*params_from_torch['main']).numpy()
print(gpu_out)

assert np.allclose(cpu_out,gpu_out,atol=1e-3)
