glenn-jocher commited on
Commit
581dc30
1 Parent(s): 554f782

Add ONNX inference providers (#5918)

Browse files

* Add ONNX inference providers

Fix for https://github.com/ultralytics/yolov5/issues/5916

* Update common.py

Files changed (1) hide show
  1. models/common.py +4 -2
models/common.py CHANGED
@@ -320,9 +320,11 @@ class DetectMultiBackend(nn.Module):
320
  net = cv2.dnn.readNetFromONNX(w)
321
  elif onnx: # ONNX Runtime
322
  LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
323
- check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'))
 
324
  import onnxruntime
325
- session = onnxruntime.InferenceSession(w, None)
 
326
  elif engine: # TensorRT
327
  LOGGER.info(f'Loading {w} for TensorRT inference...')
328
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
 
320
  net = cv2.dnn.readNetFromONNX(w)
321
  elif onnx: # ONNX Runtime
322
  LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
323
+ cuda = torch.cuda.is_available()
324
+ check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
325
  import onnxruntime
326
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
327
+ session = onnxruntime.InferenceSession(w, providers=providers)
328
  elif engine: # TensorRT
329
  LOGGER.info(f'Loading {w} for TensorRT inference...')
330
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download