# encoding=utf-8
# https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html

import torch
import torch.nn as nn
from torchvision import models

# load model
model = models.densenet121(pretrained=True)
model.classifier = nn.Linear(model.classifier.in_features, 5)
model.load_state_dict(torch.load("state.pth", map_location=torch.device('cpu')))
model.eval()

# convert by onnx
torch.onnx.export(
    model,
    torch.randn(1, 3, 224, 224),
    "export_dense121_cpu.onnx",
    verbose=True,
    opset_version=16,
    do_constant_folding=True,
    input_names=['input'],
    output_names=['output']
)

# test onnx
import onnxruntime
session = onnxruntime.InferenceSession("export_dense121_cpu.onnx")
inputs = {"input":  torch.randn(1, 3, 224, 224).numpy()}
out = session.run(None, inputs)
print(out)

