glenn-jocher pre-commit-ci[bot] commited on
Commit
f3085ac
1 Parent(s): b3eaf50

Enable ONNX `--half` FP16 inference (#6268)

Browse files

* Enable ONNX ``--half` FP16 inference

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>

Files changed (3) hide show
  1. detect.py +1 -1
  2. tutorial.ipynb +1 -1
  3. val.py +2 -2
detect.py CHANGED
@@ -94,7 +94,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
94
  imgsz = check_img_size(imgsz, s=stride) # check image size
95
 
96
  # Half
97
- half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
98
  if pt or jit:
99
  model.model.half() if half else model.model.float()
100
 
 
94
  imgsz = check_img_size(imgsz, s=stride) # check image size
95
 
96
  # Half
97
+ half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
98
  if pt or jit:
99
  model.model.half() if half else model.model.float()
100
 
tutorial.ipynb CHANGED
@@ -1099,4 +1099,4 @@
1099
  "outputs": []
1100
  }
1101
  ]
1102
- }
 
1099
  "outputs": []
1100
  }
1101
  ]
1102
+ }
val.py CHANGED
@@ -137,9 +137,9 @@ def run(data,
137
 
138
  # Load model
139
  model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
140
- stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
141
  imgsz = check_img_size(imgsz, s=stride) # check image size
142
- half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
143
  if pt or jit:
144
  model.model.half() if half else model.model.float()
145
  elif engine:
 
137
 
138
  # Load model
139
  model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data)
140
+ stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine
141
  imgsz = check_img_size(imgsz, s=stride) # check image size
142
+ half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
143
  if pt or jit:
144
  model.model.half() if half else model.model.float()
145
  elif engine: