"""ONNX推理引擎"""
import numpy as np
import onnxruntime as ort
from typing import List, Union
from .base_engine import InferenceEngine

class ONNXEngine(InferenceEngine):
    """ONNX推理引擎"""
    
    def _initialize(self):
        """初始化ONNX推理会话"""
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
        self.session = ort.InferenceSession(self.model_path, providers=providers)
        
        # 获取输入输出信息
        self.input_name = self.session.get_inputs()[0].name
        self.output_names = [output.name for output in self.session.get_outputs()]
        
        # 输入形状和类型
        input_info = self.session.get_inputs()[0]
        self.input_shape = tuple(input_info.shape)
        self.input_dtype = self._onnx_type_to_numpy(input_info.type)
    
    def _onnx_type_to_numpy(self, onnx_type: str) -> np.dtype:
        """将ONNX类型转换为numpy类型"""
        type_map = {
            'tensor(float)': np.float32,
            'tensor(double)': np.float64,
            'tensor(int32)': np.int32,
            'tensor(int64)': np.int64,
            'tensor(uint8)': np.uint8,
        }
        return type_map.get(onnx_type, np.float32)
    
    def infer(self, input_data: np.ndarray) -> Union[np.ndarray, List[np.ndarray]]:
        """执行推理"""
        input_data = input_data.astype(self.input_dtype)
        outputs = self.session.run(self.output_names, {self.input_name: input_data})
        return outputs if len(outputs) > 1 else outputs[0]
    
    def get_input_shape(self) -> tuple:
        """获取输入形状"""
        return self.input_shape
    
    def get_input_dtype(self) -> np.dtype:
        """获取输入数据类型"""
        return self.input_dtype