"""
此代码建立在SimpleImpl.py基础上重新编写
    - 主要使用Onnx替代相关推理操作，以实现较好的cpu兼容性
    - 此实现比TorchScript快，后续部署实现采取此代码重写
"""
import time
import onnxruntime
import cv2
import numpy as np

model = onnxruntime.InferenceSession("v1_onnx.onnx")


# print(onnxruntime.get_all_providers())
print(onnxruntime.get_available_providers())

def torch_style_softmax_numpy_impl(x, axis=-1):
    e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
    return e_x / np.sum(e_x, axis=axis, keepdims=True)


def onnx_infer_data(x):
    start = time.perf_counter()
    a = model.run(None, {
        'input:0': x
    })[0]
    end = time.perf_counter()
    print("onnx cost: ", end - start)
    return a


def get_nnunet_predict(x: np.ndarray, dims: list):
    return np.flip(
        torch_style_softmax_numpy_impl(onnx_infer_data(
            np.flip(x, dims)
        ), 1),
        dims
    )


sample_image = "../resource/raw_resize/1.jpg"
start_time = time.perf_counter()
mat = cv2.imread(sample_image)
mat = mat.transpose((2, 0, 1))
mat = mat.astype(np.float32)
for c in range(mat.shape[0]):
    mat[c] = (mat[c] - mat[c].mean()) / (mat[c].std() + 1e-8)
mat = np.expand_dims(mat, 0)

pred = torch_style_softmax_numpy_impl(onnx_infer_data(mat), 1)
pred += get_nnunet_predict(mat, [3])
pred += get_nnunet_predict(mat, [2])
pred += get_nnunet_predict(mat, [3, 2])
pred /= 4.0

# save
pred = np.argmax(pred, 1)
pred = pred[0]
cv2.imwrite("out_pred_onnx.png", pred * 255)

end_time = time.perf_counter()
print("time cost: ", end_time - start_time, "s")
