xylieong pre-commit-ci[bot] glenn-jocher commited on
Commit
a3a652c
1 Parent(s): 541a5b7

Add OpenVINO metadata to export (#7947)

Browse files

* Write .yaml file when exporting model to openvino

Write .yaml file automatically when exporting model to openvino to be used during inference

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Update export.py

* Update export.py

* Load metadata on inference

* Update common.py

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

Files changed (2) hide show
  1. export.py +6 -3
  2. models/common.py +18 -9
export.py CHANGED
@@ -54,6 +54,7 @@ from pathlib import Path
54
 
55
  import pandas as pd
56
  import torch
 
57
  from torch.utils.mobile_optimizer import optimize_for_mobile
58
 
59
  FILE = Path(__file__).resolve()
@@ -168,7 +169,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
168
  LOGGER.info(f'{prefix} export failure: {e}')
169
 
170
 
171
- def export_openvino(file, half, prefix=colorstr('OpenVINO:')):
172
  # YOLOv5 OpenVINO export
173
  try:
174
  check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
@@ -178,7 +179,9 @@ def export_openvino(file, half, prefix=colorstr('OpenVINO:')):
178
  f = str(file).replace('.pt', f'_openvino_model{os.sep}')
179
 
180
  cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
181
- subprocess.check_output(cmd.split())
 
 
182
 
183
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
184
  return f
@@ -520,7 +523,7 @@ def run(
520
  if onnx or xml: # OpenVINO requires ONNX
521
  f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
522
  if xml: # OpenVINO
523
- f[3] = export_openvino(file, half)
524
  if coreml:
525
  _, f[4] = export_coreml(model, im, file, int8, half)
526
 
 
54
 
55
  import pandas as pd
56
  import torch
57
+ import yaml
58
  from torch.utils.mobile_optimizer import optimize_for_mobile
59
 
60
  FILE = Path(__file__).resolve()
 
169
  LOGGER.info(f'{prefix} export failure: {e}')
170
 
171
 
172
+ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')):
173
  # YOLOv5 OpenVINO export
174
  try:
175
  check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
 
179
  f = str(file).replace('.pt', f'_openvino_model{os.sep}')
180
 
181
  cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
182
+ subprocess.check_output(cmd.split()) # export
183
+ with open(Path(f) / 'meta.yaml', 'w') as g:
184
+ yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml
185
 
186
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
187
  return f
 
523
  if onnx or xml: # OpenVINO requires ONNX
524
  f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
525
  if xml: # OpenVINO
526
+ f[3] = export_openvino(model, file, half)
527
  if coreml:
528
  _, f[4] = export_coreml(model, im, file, int8, half)
529
 
models/common.py CHANGED
@@ -326,9 +326,6 @@ class DetectMultiBackend(nn.Module):
326
  stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults
327
  w = attempt_download(w) # download if not local
328
  fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
329
- if data: # data.yaml path (optional)
330
- with open(data, errors='ignore') as f:
331
- names = yaml.safe_load(f)['names'] # class names
332
 
333
  if pt: # PyTorch
334
  model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
@@ -367,7 +364,8 @@ class DetectMultiBackend(nn.Module):
367
  w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
368
  network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
369
  executable_network = ie.compile_model(model=network, device_name="CPU")
370
- self.output_layer = next(iter(executable_network.outputs))
 
371
  elif engine: # TensorRT
372
  LOGGER.info(f'Loading {w} for TensorRT inference...')
373
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
@@ -433,7 +431,11 @@ class DetectMultiBackend(nn.Module):
433
  output_details = interpreter.get_output_details() # outputs
434
  elif tfjs:
435
  raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
 
436
  self.__dict__.update(locals()) # assign all variables to self
 
 
 
437
 
438
  def forward(self, im, augment=False, visualize=False, val=False):
439
  # YOLOv5 MultiBackend inference
@@ -493,13 +495,20 @@ class DetectMultiBackend(nn.Module):
493
  y = torch.tensor(y, device=self.device)
494
  return (y, []) if val else y
495
 
 
 
 
 
 
 
 
496
  def warmup(self, imgsz=(1, 3, 640, 640)):
497
  # Warmup model by running inference once
498
- if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types
499
- if self.device.type != 'cpu': # only warmup GPU models
500
- im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
501
- for _ in range(2 if self.jit else 1): #
502
- self.forward(im) # warmup
503
 
504
  @staticmethod
505
  def model_type(p='path/to/model.pt'):
 
326
  stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults
327
  w = attempt_download(w) # download if not local
328
  fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
 
 
 
329
 
330
  if pt: # PyTorch
331
  model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
 
364
  w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
365
  network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
366
  executable_network = ie.compile_model(model=network, device_name="CPU")
367
+ output_layer = next(iter(executable_network.outputs))
368
+ self._load_metadata(w.parent / 'meta.yaml') # load metadata
369
  elif engine: # TensorRT
370
  LOGGER.info(f'Loading {w} for TensorRT inference...')
371
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
 
431
  output_details = interpreter.get_output_details() # outputs
432
  elif tfjs:
433
  raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
434
+
435
  self.__dict__.update(locals()) # assign all variables to self
436
+ if not hasattr(self, 'names') and data: # assign class names (optional)
437
+ with open(data, errors='ignore') as f:
438
+ names = yaml.safe_load(f)['names']
439
 
440
  def forward(self, im, augment=False, visualize=False, val=False):
441
  # YOLOv5 MultiBackend inference
 
495
  y = torch.tensor(y, device=self.device)
496
  return (y, []) if val else y
497
 
498
+ def _load_metadata(self, f='path/to/meta.yaml'):
499
+ # Load metadata from meta.yaml if it exists
500
+ if Path(f).is_file():
501
+ with open(f, errors='ignore') as f:
502
+ for k, v in yaml.safe_load(f).items():
503
+ setattr(self, k, v) # assign stride, names
504
+
505
  def warmup(self, imgsz=(1, 3, 640, 640)):
506
  # Warmup model by running inference once
507
+ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb
508
+ if any(warmup_types) and self.device.type != 'cpu':
509
+ im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
510
+ for _ in range(2 if self.jit else 1): #
511
+ self.forward(im) # warmup
512
 
513
  @staticmethod
514
  def model_type(p='path/to/model.pt'):