"""
此代码建立在SimpleImpl.py基础上重新编写
    - 主要使用Onnx替代相关推理操作，以实现较好的cpu兼容性
    - 此实现比TorchScript快，后续部署实现采取此代码重写

    - https://onnxruntime.ai/docs/build/eps.html#openvino
    - https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html
    1. 需要python 3.12
    2. `pip install onnx onnxruntime-openvino`, 确保安装版本是1.20.0
    3. 根据这个: https://github.com/intel/onnxruntime/releases/的介绍1.20.0需要下载Openvino 2024.5.0
        - https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/
        - `pip install openvino 2024.5.0`
    4. `pip install opencv-python`
    5. 推荐使用干净环境，不要安装除了上面外的包

    6. linux上述步骤不需要安装openvino，版本要求3.12
"""
import time
import onnxruntime
import onnxruntime.tools.add_openvino_win_libs as utils

utils.add_openvino_libs_to_path()
import cv2
import numpy as np

print(onnxruntime.get_available_providers())  # OpenVINOExecutionProvider, 确保存在
model = onnxruntime.InferenceSession("v1_onnx.onnx", providers=['OpenVINOExecutionProvider', 'CPUExecutionProvider'])


def torch_style_softmax_numpy_impl(x, axis=-1):
    e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
    return e_x / np.sum(e_x, axis=axis, keepdims=True)


def onnx_infer_data(x):
    start = time.perf_counter()
    a = model.run(None, {
        'input:0': x
    })[0]
    end = time.perf_counter()
    print("onnx cost: ", end - start)
    return a


def get_nnunet_predict(x: np.ndarray, dims: list):
    return np.flip(
        torch_style_softmax_numpy_impl(onnx_infer_data(
            np.flip(x, dims)
        ), 1),
        dims
    )


def get_nnunet_predict_without_softmax(x: np.ndarray, dims: list):
    return np.flip(onnx_infer_data(np.flip(x, dims)), dims)


sample_image = "../resource/raw_resize/1.jpg"
start_time = time.perf_counter()
mat = cv2.imread(sample_image)
mat = mat.transpose((2, 0, 1))
mat = mat.astype(np.float32)
for c in range(mat.shape[0]):
    mat[c] = (mat[c] - mat[c].mean()) / (mat[c].std() + 1e-8)
mat = np.expand_dims(mat, 0)

# pred = torch_style_softmax_numpy_impl(onnx_infer_data(mat), 1)
# pred += get_nnunet_predict(mat, [3])
# pred += get_nnunet_predict(mat, [2])
# pred += get_nnunet_predict(mat, [3, 2])
# pred /= 4.0

# 从cpp优化中测试得到的结果, 会有差异但是能接受，结果差异大用上面的，否则用这个效率更高(几乎降低一半)
# 所以py的封装里需要添加额外的一个选项，比如--type=simple/normal之类的
# 此代码编译的时候应该需要手动拷贝openvino和onnxruntime下的所有dll到某个位置
pred = onnx_infer_data(mat)
pred += get_nnunet_predict_without_softmax(mat, [3])
pred += get_nnunet_predict_without_softmax(mat, [2])
pred += get_nnunet_predict_without_softmax(mat, [3, 2])

# save
pred = np.argmax(pred, 1)
pred = pred[0]
cv2.imwrite("out_pred_onnx_openvino.png", pred * 255)

end_time = time.perf_counter()
print("time cost: ", end_time - start_time, "s")
