import onnxruntime
import tensorrt
class PredictBase(object):
    def __init__(self):
        pass

    def get_onnx_session(self, model_dir, use_gpu):
        # 使用gpu
        if use_gpu:
            providers =[('CUDAExecutionProvider',{"cudnn_conv_algo_search": "DEFAULT"}),'CPUExecutionProvider']
        else:
            providers =['CPUExecutionProvider']

        onnx_session = onnxruntime.InferenceSession(model_dir, None,providers=providers)

        # print("providers:", onnxruntime.get_device())
        return onnx_session


    def get_output_name(self, onnx_session):
        """
        output_name = onnx_session.get_outputs()[0].name
        :param onnx_session:
        :return:
        """
        output_name = []
        for node in onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name

    def get_input_name(self, onnx_session):
        """
        input_name = onnx_session.get_inputs()[0].name
        :param onnx_session:
        :return:
        """
        input_name = []
        for node in onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name

    def get_input_feed(self, input_name, image_numpy):
        """
        input_feed={self.input_name: image_numpy}
        :param input_name:
        :param image_numpy:
        :return:
        """
        input_feed = {}
        for name in input_name:
            input_feed[name] = image_numpy
        return input_feed

from .common import eval_batch, create_engine_context, get_io_bindings

class PredictBaseIxrt(object):
    def __init__(self):
        self.logger = tensorrt.Logger(tensorrt.Logger.ERROR)
        self.inputs = None
        self.outputs = None
        self.allocations = None
        self.context = None
        self.engine = None

        self.in_addr = None
        self.in_bytes = 0
        self.out_addr = None
        self.out_bytes = 0

    def set_ixrt_engine(self, model_dir):
        print("Setting up IXRT engine with model:", model_dir)
        self.engine, self.context = create_engine_context(model_dir, self.logger)
        # self.inputs, self.outputs, self.allocations = get_io_bindings(self.engine)

    # def get_allocations(self):
    #     return self.allocations

    # def get_outputs(self):
    #     return self.outputs


import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException

class PredictBaseTritonServer(object):
    def __init__(self):
        self.logger = tensorrt.Logger(tensorrt.Logger.ERROR)
        self.inputs = None
        self.outputs = None
        self.client = None

    def get_client(self, model_url, model_name):
        try:
            self.client = httpclient.InferenceServerClient(
                url=model_url,
                verbose=False
            )
        except Exception as e:
            print(f"Failed to initialize Triton client: {e}")
            return
        
        # 检查模型是否就绪
        if not self.client.is_model_ready(model_name):
            print(f"Model detection is not ready")
            return
        
        self.model_name = model_name

        return self.client

    def get_inputs(self, input, name, type):
        # 设置Triton输入
        inputs = []
        inputs.append(httpclient.InferInput(
            name,
            input.shape,
            type
        ))
        inputs[-1].set_data_from_numpy(input)
        return inputs
