from pathlib import Path
from typing import Optional

import cv2
import numpy as np
import onnxruntime as ort

class ONNXNode:
    def __init__(self, node: ort.NodeArg):
        self.shape = node.shape
        self.name = node.name
        self.type = node.type
        self.numpy_type = np.float32

    def __str__(self):
        return 'name: {:>7},  type: {},  shape: {}'.format(self.name, self.type, self.shape)

_default_providers = ['OpenVINOExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']
class ONNXDetectInference:
    """ 目标检测推理类
    """

    def __init__(self,
                 onnx_path: Path | str,
                 session_option: Optional[ort.SessionOptions] = None,
                 run_option: Optional[ort.RunOptions] = None,
                 providers: Optional[list[str]] = None,
                 device_type: Optional[str] = None,
                 device_id: int = 0
                 ):
        """ 目标检测推理配置

        :param onnx_path: onnx模型的路径
        :param session_option: 会话选项，详细见onnxruntime.SessionOptions类, 为None表示默认配置
        :param run_option: 推理选项, 详细见onnxruntime.RRunOptions类, 为None表示默认配置
        :param providers: 执行提供程序列表, 为None表示使用默认配置 (xinfer.detect.inference._default_providers)
        :param device_type: 设备类型, 为None表示根据providers参数来自动推断
        :param device_id: 设备id, 默认为0

        ::

            使用示例1:
            ONNXDetectInference('yolov10n.onnx')

            使用示例2:
            ONNXDetectInference('yolov10n.onnx', None, None, ['CUDAExecutionProvider', 'CPUExecutionProvider'])

            使用示例3:
            ONNXDetectInference('yolov10n.onnx', None, None, ['CUDAExecutionProvider'], 'cuda', 0)

        """
        if providers is None:
            providers = _default_providers
        onnx_path = Path(onnx_path).absolute()
        if not onnx_path.exists():
            raise FileNotFoundError(f'路径\'{onnx_path}\'不存在')
        if session_option is None:
            session_option = ort.SessionOptions()

        self.session = ort.InferenceSession(onnx_path, session_option, providers=providers)
        self.run_option = run_option
        self.input_node = ONNXNode(self.session.get_inputs()[0])
        self.output_node = ONNXNode(self.session.get_outputs()[0])

        if device_type is None:
            # 自动判断device_type的类型
            available_providers = ort.get_available_providers()
            for provider in providers:
                if provider not in available_providers:
                    continue
                device_type = provider.replace('ExecutionProvider', '').lower()
                break
            if device_type not in ort.get_device():
                device_type = 'cpu'

        self.output_data = ort.OrtValue.ortvalue_from_numpy(
            np.zeros(shape=self.output_node.shape, dtype=np.float32),
            device_type=device_type,
            device_id=device_id
        )
        self.io_binding = self.session.io_binding()
        self.io_binding.bind_output(
            name=self.output_node.name,
            device_type='cpu',
            device_id=0,
            element_type=self.output_node.numpy_type,
            shape=self.output_node.shape,
            buffer_ptr=self.output_data.data_ptr()
        )

    def run(self, input_data: np.ndarray) -> np.ndarray:
        self.io_binding.bind_input(
            name=self.input_node.name,
            device_type='cpu',
            device_id=0,
            element_type=self.input_node.numpy_type,
            shape=self.input_node.shape,
            buffer_ptr=ort.OrtValue.ortvalue_from_numpy(input_data, 'cpu', 0).data_ptr()
        )
        self.session.run_with_iobinding(self.io_binding)
        return self.output_data.numpy()

    def __del__(self):
        del self.session

if __name__ == '__main__':
    from xinfer_yolo.detect.preprocess import preprocess

    a = ONNXDetectInference(Path(__file__).parent.parent.parent / 'onnx_model' / 'yolov10n.onnx')
    image = cv2.imread(str(Path(__file__).parent.parent.parent / 'tests' / 'images' / 'bus.jpg'))
    dst = np.zeros(shape=a.input_node.shape[1:], dtype=np.float32)
    preprocess(image, dst)
    dst = dst.reshape(a.input_node.shape)

    a.run(dst)
    print(a.run(dst))
