"""
此代码建立在SimpleImpl.py基础上重新编写
    - 主要使用Onnx替代相关推理操作，以实现较好的cpu兼容性
    - 此实现比TorchScript快，后续部署实现采取此代码重写

    - https://onnxruntime.ai/docs/build/eps.html#openvino
    - https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html
    1. 需要python 3.12
    2. `pip install onnx onnxruntime-openvino`, 确保安装版本是1.20.0
    3. 根据这个: https://github.com/intel/onnxruntime/releases/的介绍1.20.0需要下载Openvino 2024.5.0
        - https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.5/
        - `pip install openvino 2024.5.0`
    4. `pip install opencv-python`
    5. 推荐使用干净环境，不要安装除了上面外的包

    6. linux上述步骤不需要安装openvino，版本要求3.12
"""
import os
import onnxruntime
import sys

if sys.platform == "win32" or sys.platform == "win64":
    file_now = os.path.dirname(os.path.abspath(__file__))
    openvino_libs = os.path.join(file_now, "openvino_libs")
    os.environ["PATH"] = os.path.abspath(openvino_libs) + ";" + os.environ["PATH"]
    os.environ["OPENVINO_LIB_PATHS"] = os.path.abspath(openvino_libs) + ";"
    os.add_dll_directory(os.path.abspath(openvino_libs))

import argparse
import numpy as np
import time
import cv2


class UnetOnnx:
    def __init__(self, model_name):
        file_now = os.path.dirname(os.path.abspath(__file__))
        model_path = os.path.join(file_now, model_name)
        self.model = onnxruntime.InferenceSession(model_path, providers=['OpenVINOExecutionProvider', 'CPUExecutionProvider'])

    def resize_with_aspect_ratio(self, image, width=None, height=None, interpolation=cv2.INTER_AREA):
        h, w = image.shape[:2]

        scale_w = width / w
        scale_h = height / h

        scale = min(scale_h, scale_w)

        dim = (int(w * scale), int(h * scale))

        resized = cv2.resize(image, dim, interpolation=interpolation)
        return resized

    def torch_style_softmax_numpy_impl(self, x, axis=-1):
        e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
        return e_x / np.sum(e_x, axis=axis, keepdims=True)

    def onnx_infer_data(self, x):
        start = time.perf_counter()
        a = self.model.run(None, {
            'input:0': x
        })[0]
        end = time.perf_counter()
        print("onnx cost: ", end - start)
        return a

    def get_nnunet_predict(self, x: np.ndarray, dims: list):
        return np.flip(
            self.torch_style_softmax_numpy_impl(self.onnx_infer_data(
                np.flip(x, dims)
            ), 1),
            dims
        )

    def get_nnunet_predict_without_softmax(self, x: np.ndarray, dims: list):
        return np.flip(self.onnx_infer_data(np.flip(x, dims)), dims)

    def cv_imread(self, file_path):
        cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), cv2.IMREAD_COLOR)
        return cv_img

    def preprocess(self, img_pth):
        mat = self.cv_imread(img_pth)
        mat = self.resize_with_aspect_ratio(mat, 512, 512)
        mat = mat.transpose((2, 0, 1))
        mat = mat.astype(np.float32)
        for c in range(mat.shape[0]):
            mat[c] = (mat[c] - mat[c].mean()) / (mat[c].std() + 1e-8)
        mat = np.expand_dims(mat, 0)
        return mat

    def postprocess(self, pred):
        pred = np.argmax(pred, 1)
        pred = pred[0]
        return pred*255

    def infer_fast(self, img):
        input = self.preprocess(img)
        pred = self.onnx_infer_data(input)
        pred += self.get_nnunet_predict_without_softmax(input, [3])
        pred += self.get_nnunet_predict_without_softmax(input, [2])
        pred += self.get_nnunet_predict_without_softmax(input, [3, 2])
        return self.postprocess(pred)

    def infer_quality(self, img):
        input = self.preprocess(img)
        pred = self.torch_style_softmax_numpy_impl(self.onnx_infer_data(input), 1)
        pred += self.get_nnunet_predict(input, [3])
        pred += self.get_nnunet_predict(input, [2])
        pred += self.get_nnunet_predict(input, [3, 2])
        pred /= 4.0
        return self.postprocess(pred)

def run_one(model, img, dir, mode: str):
    start_time = time.perf_counter()
    if mode == 'fast':
        output = model.infer_fast(img)
    elif mode == 'quality':
        output = model.infer_quality(img)
    output_path = os.path.join(dir, os.path.basename(img))
    cv2.imencode('.png', output)[1].tofile(output_path)
    end_time = time.perf_counter()
    print(f"infer file:{os.path.basename(img)},total time cost:{end_time - start_time}")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', type=str, required=True, help="input_image")
    parser.add_argument('-o', type=str, required=True, help="output_dir")
    parser.add_argument('-m', type=str, required=True, help="fast or quality")
    parser.add_argument('-n', type=str, required=True, help="name of model")
    args = parser.parse_args()

    model = UnetOnnx(args.n)

    if args.i == ' ' or args.o == ' ' or args.n == ' ':
        print("input_image or output_dir or name_of_model NOT FOUND!")
        exit(-1)

    if not os.path.exists(args.i):
        print("input image not exist!")
        exit(-2)

    if not os.path.exists(args.n):
        print("model not exist!")
        exit(-2)

    if not os.path.basename(args.i).endswith('png'):
        print("input image is not png!")
        exit(-2)

    try:
        os.makedirs(args.o,exist_ok=True)
    except Exception as e:
        print(e)
        exit(-3)

    try:
        run_one(model, args.i, args.o, args.m)
    except Exception as e:
        print(e)
        exit(-4)







