# encoding: utf-8

import numpy as np

import onnxruntime
providers = ['CPUExecutionProvider']
# if gpu:
# providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
session = onnxruntime.InferenceSession("model2.onnx", providers=providers)


inputs = np.array([[10.55, 10.72, 10.46, 10.62, 10.59, 10.82, 10.7, 10.57, 10.83, 11.6],
                   [10.55, 10.72, 10.46, 10.62, 10.59, 10.82, 10.7, 10.57, 10.83, 11.6]], dtype=np.float32)
ort_input = {"input": inputs}

ort_output = session.run(["output"], {session.get_inputs()[0].name: inputs})
print(ort_output)
# [array([[-2.4968026]], dtype=float32)]
