import torch
import onnxruntime as rt
from PIL import Image
import numpy as np
import torchvision.transforms as transforms

# 定义模型输入的预处理步骤
def preprocess_image(image_path, model_input_size=(224, 224)):
    # 加载图片
    image = Image.open(image_path)

    # 定义预处理转换链
    preprocess = transforms.Compose([
        transforms.Resize(model_input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.17263485, 0.15147247, 0.14267451], [0.0736155, 0.06216329, 0.05930814]),
    ])

    # 应用预处理
    image_tensor = preprocess(image).unsqueeze(0)  # 增加一个批次维度

    return image_tensor

# 加载ONNX模型
sess = rt.InferenceSession("net_18.onnx")

# 定义图片路径
image_path = "images.jfif"  # 替换为您的图片路径

# 预处理图片
input_data = preprocess_image(image_path)

# 转换为ONNX Runtime tensors
input_ort_tensor = sess.get_inputs()[0].name
ort_inputs = {input_ort_tensor: input_data.numpy()}

# 运行模型
ort_outputs = sess.run(None, ort_inputs)
print(ort_outputs)
# 如果模型输出是多个tensor，可以按需获取
output_data = torch.from_numpy(ort_outputs[0])

print("Output shape: ", output_data)
pre_lab = torch.argmax(output_data, dim=1)
print(pre_lab)
classes = ['戴口罩', '不带口罩']
result = pre_lab.item()
print(classes[result])
