glenn-jocher
commited on
Add ONNX export metadata (#7353)
Browse files- export.py +7 -1
- models/common.py +3 -0
export.py
CHANGED
@@ -140,7 +140,13 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
|
|
140 |
# Checks
|
141 |
model_onnx = onnx.load(f) # load onnx model
|
142 |
onnx.checker.check_model(model_onnx) # check onnx model
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
# Simplify
|
146 |
if simplify:
|
|
|
140 |
# Checks
|
141 |
model_onnx = onnx.load(f) # load onnx model
|
142 |
onnx.checker.check_model(model_onnx) # check onnx model
|
143 |
+
|
144 |
+
# Metadata
|
145 |
+
d = {'stride': int(max(model.stride)), 'names': model.names}
|
146 |
+
for k, v in d.items():
|
147 |
+
meta = model_onnx.metadata_props.add()
|
148 |
+
meta.key, meta.value = k, str(v)
|
149 |
+
onnx.save(model_onnx, f)
|
150 |
|
151 |
# Simplify
|
152 |
if simplify:
|
models/common.py
CHANGED
@@ -328,6 +328,9 @@ class DetectMultiBackend(nn.Module):
|
|
328 |
import onnxruntime
|
329 |
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
330 |
session = onnxruntime.InferenceSession(w, providers=providers)
|
|
|
|
|
|
|
331 |
elif xml: # OpenVINO
|
332 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
333 |
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
|
|
328 |
import onnxruntime
|
329 |
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
330 |
session = onnxruntime.InferenceSession(w, providers=providers)
|
331 |
+
meta = session.get_modelmeta().custom_metadata_map # metadata
|
332 |
+
if 'stride' in meta:
|
333 |
+
stride, names = int(meta['stride']), eval(meta['names'])
|
334 |
elif xml: # OpenVINO
|
335 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
336 |
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
|