glenn-jocher commited on
Commit
5bd6a97
1 Parent(s): 7cad659

Global export format sort (#6182)

Browse files

* Global export sort

* Cleanup

Files changed (4) hide show
  1. detect.py +3 -3
  2. export.py +74 -74
  3. models/common.py +40 -40
  4. val.py +3 -3
detect.py CHANGED
@@ -15,13 +15,13 @@ Usage - formats:
15
  $ python path/to/detect.py --weights yolov5s.pt # PyTorch
16
  yolov5s.torchscript # TorchScript
17
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
18
- yolov5s.mlmodel # CoreML (under development)
19
  yolov5s.xml # OpenVINO
 
 
20
  yolov5s_saved_model # TensorFlow SavedModel
21
- yolov5s.pb # TensorFlow protobuf
22
  yolov5s.tflite # TensorFlow Lite
23
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
24
- yolov5s.engine # TensorRT
25
  """
26
 
27
  import argparse
 
15
  $ python path/to/detect.py --weights yolov5s.pt # PyTorch
16
  yolov5s.torchscript # TorchScript
17
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
 
18
  yolov5s.xml # OpenVINO
19
+ yolov5s.engine # TensorRT
20
+ yolov5s.mlmodel # CoreML (under development)
21
  yolov5s_saved_model # TensorFlow SavedModel
22
+ yolov5s.pb # TensorFlow GraphDef
23
  yolov5s.tflite # TensorFlow Lite
24
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
 
25
  """
26
 
27
  import argparse
export.py CHANGED
@@ -2,19 +2,19 @@
2
  """
3
  Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
 
5
- Format | Example | `--include ...` argument
6
  --- | --- | ---
7
- PyTorch | yolov5s.pt | -
8
- TorchScript | yolov5s.torchscript | `torchscript`
9
- ONNX | yolov5s.onnx | `onnx`
10
- CoreML | yolov5s.mlmodel | `coreml`
11
- OpenVINO | yolov5s_openvino_model/ | `openvino`
12
- TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model`
13
- TensorFlow GraphDef | yolov5s.pb | `pb`
14
- TensorFlow Lite | yolov5s.tflite | `tflite`
15
- TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu`
16
- TensorFlow.js | yolov5s_web_model/ | `tfjs`
17
- TensorRT | yolov5s.engine | `engine`
18
 
19
  Usage:
20
  $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs
@@ -23,13 +23,13 @@ Inference:
23
  $ python path/to/detect.py --weights yolov5s.pt # PyTorch
24
  yolov5s.torchscript # TorchScript
25
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
26
- yolov5s.mlmodel # CoreML (under development)
27
  yolov5s.xml # OpenVINO
 
 
28
  yolov5s_saved_model # TensorFlow SavedModel
29
- yolov5s.pb # TensorFlow protobuf
30
  yolov5s.tflite # TensorFlow Lite
31
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
32
- yolov5s.engine # TensorRT
33
 
34
  TensorFlow.js:
35
  $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@@ -126,6 +126,23 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst
126
  LOGGER.info(f'{prefix} export failure: {e}')
127
 
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
130
  # YOLOv5 CoreML export
131
  ct_model = None
@@ -148,27 +165,57 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
148
  return ct_model
149
 
150
 
151
- def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
152
- # YOLOv5 OpenVINO export
153
  try:
154
- check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
155
- import openvino.inference_engine as ie
156
 
157
- LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
158
- f = str(file).replace('.pt', '_openvino_model' + os.sep)
 
 
159
 
160
- cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
161
- subprocess.check_output(cmd, shell=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
 
 
 
 
 
 
163
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 
164
  except Exception as e:
165
  LOGGER.info(f'\n{prefix} export failure: {e}')
166
 
167
 
168
  def export_saved_model(model, im, file, dynamic,
169
  tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
170
- conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')):
171
- # YOLOv5 TensorFlow saved_model export
172
  keras_model = None
173
  try:
174
  import tensorflow as tf
@@ -304,53 +351,6 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
304
  LOGGER.info(f'\n{prefix} export failure: {e}')
305
 
306
 
307
- def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
308
- # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
309
- try:
310
- check_requirements(('tensorrt',))
311
- import tensorrt as trt
312
-
313
- opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x
314
- export_onnx(model, im, file, opset, train, False, simplify)
315
- onnx = file.with_suffix('.onnx')
316
- assert onnx.exists(), f'failed to export ONNX file: {onnx}'
317
-
318
- LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
319
- f = file.with_suffix('.engine') # TensorRT engine file
320
- logger = trt.Logger(trt.Logger.INFO)
321
- if verbose:
322
- logger.min_severity = trt.Logger.Severity.VERBOSE
323
-
324
- builder = trt.Builder(logger)
325
- config = builder.create_builder_config()
326
- config.max_workspace_size = workspace * 1 << 30
327
-
328
- flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
329
- network = builder.create_network(flag)
330
- parser = trt.OnnxParser(network, logger)
331
- if not parser.parse_from_file(str(onnx)):
332
- raise RuntimeError(f'failed to load ONNX file: {onnx}')
333
-
334
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
335
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
336
- LOGGER.info(f'{prefix} Network Description:')
337
- for inp in inputs:
338
- LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
339
- for out in outputs:
340
- LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
341
-
342
- half &= builder.platform_has_fast_fp16
343
- LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}')
344
- if half:
345
- config.set_flag(trt.BuilderFlag.FP16)
346
- with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
347
- t.write(engine.serialize())
348
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
349
-
350
- except Exception as e:
351
- LOGGER.info(f'\n{prefix} export failure: {e}')
352
-
353
-
354
  @torch.no_grad()
355
  def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
356
  weights=ROOT / 'yolov5s.pt', # weights path
@@ -417,12 +417,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
417
  export_torchscript(model, im, file, optimize)
418
  if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX
419
  export_onnx(model, im, file, opset, train, dynamic, simplify)
 
 
420
  if 'engine' in include:
421
  export_engine(model, im, file, train, half, simplify, workspace, verbose)
422
  if 'coreml' in include:
423
  export_coreml(model, im, file)
424
- if 'openvino' in include:
425
- export_openvino(model, im, file)
426
 
427
  # TensorFlow Exports
428
  if any(tf_exports):
 
2
  """
3
  Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
 
5
+ Format | `export.py --include` | Model
6
  --- | --- | ---
7
+ PyTorch | - | yolov5s.pt
8
+ TorchScript | `torchscript` | yolov5s.torchscript
9
+ ONNX | `onnx` | yolov5s.onnx
10
+ OpenVINO | `openvino` | yolov5s_openvino_model/
11
+ TensorRT | `engine` | yolov5s.engine
12
+ CoreML | `coreml` | yolov5s.mlmodel
13
+ TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolov5s.pb
15
+ TensorFlow Lite | `tflite` | yolov5s.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolov5s_web_model/
18
 
19
  Usage:
20
  $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs
 
23
  $ python path/to/detect.py --weights yolov5s.pt # PyTorch
24
  yolov5s.torchscript # TorchScript
25
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
 
26
  yolov5s.xml # OpenVINO
27
+ yolov5s.engine # TensorRT
28
+ yolov5s.mlmodel # CoreML (under development)
29
  yolov5s_saved_model # TensorFlow SavedModel
30
+ yolov5s.pb # TensorFlow GraphDef
31
  yolov5s.tflite # TensorFlow Lite
32
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
 
33
 
34
  TensorFlow.js:
35
  $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
 
126
  LOGGER.info(f'{prefix} export failure: {e}')
127
 
128
 
129
+ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
130
+ # YOLOv5 OpenVINO export
131
+ try:
132
+ check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
133
+ import openvino.inference_engine as ie
134
+
135
+ LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
136
+ f = str(file).replace('.pt', '_openvino_model' + os.sep)
137
+
138
+ cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
139
+ subprocess.check_output(cmd, shell=True)
140
+
141
+ LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
142
+ except Exception as e:
143
+ LOGGER.info(f'\n{prefix} export failure: {e}')
144
+
145
+
146
  def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
147
  # YOLOv5 CoreML export
148
  ct_model = None
 
165
  return ct_model
166
 
167
 
168
+ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
169
+ # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
170
  try:
171
+ check_requirements(('tensorrt',))
172
+ import tensorrt as trt
173
 
174
+ opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x
175
+ export_onnx(model, im, file, opset, train, False, simplify)
176
+ onnx = file.with_suffix('.onnx')
177
+ assert onnx.exists(), f'failed to export ONNX file: {onnx}'
178
 
179
+ LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
180
+ f = file.with_suffix('.engine') # TensorRT engine file
181
+ logger = trt.Logger(trt.Logger.INFO)
182
+ if verbose:
183
+ logger.min_severity = trt.Logger.Severity.VERBOSE
184
+
185
+ builder = trt.Builder(logger)
186
+ config = builder.create_builder_config()
187
+ config.max_workspace_size = workspace * 1 << 30
188
+
189
+ flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
190
+ network = builder.create_network(flag)
191
+ parser = trt.OnnxParser(network, logger)
192
+ if not parser.parse_from_file(str(onnx)):
193
+ raise RuntimeError(f'failed to load ONNX file: {onnx}')
194
+
195
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
196
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
197
+ LOGGER.info(f'{prefix} Network Description:')
198
+ for inp in inputs:
199
+ LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
200
+ for out in outputs:
201
+ LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
202
 
203
+ half &= builder.platform_has_fast_fp16
204
+ LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}')
205
+ if half:
206
+ config.set_flag(trt.BuilderFlag.FP16)
207
+ with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
208
+ t.write(engine.serialize())
209
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
210
+
211
  except Exception as e:
212
  LOGGER.info(f'\n{prefix} export failure: {e}')
213
 
214
 
215
  def export_saved_model(model, im, file, dynamic,
216
  tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
217
+ conf_thres=0.25, prefix=colorstr('TensorFlow SavedModel:')):
218
+ # YOLOv5 TensorFlow SavedModel export
219
  keras_model = None
220
  try:
221
  import tensorflow as tf
 
351
  LOGGER.info(f'\n{prefix} export failure: {e}')
352
 
353
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  @torch.no_grad()
355
  def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
356
  weights=ROOT / 'yolov5s.pt', # weights path
 
417
  export_torchscript(model, im, file, optimize)
418
  if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX
419
  export_onnx(model, im, file, opset, train, dynamic, simplify)
420
+ if 'openvino' in include:
421
+ export_openvino(model, im, file)
422
  if 'engine' in include:
423
  export_engine(model, im, file, train, half, simplify, workspace, verbose)
424
  if 'coreml' in include:
425
  export_coreml(model, im, file)
 
 
426
 
427
  # TensorFlow Exports
428
  if any(tf_exports):
models/common.py CHANGED
@@ -316,17 +316,6 @@ class DetectMultiBackend(nn.Module):
316
  if extra_files['config.txt']:
317
  d = json.loads(extra_files['config.txt']) # extra_files dict
318
  stride, names = int(d['stride']), d['names']
319
- elif coreml: # CoreML
320
- LOGGER.info(f'Loading {w} for CoreML inference...')
321
- import coremltools as ct
322
- model = ct.models.MLModel(w)
323
- elif xml: # OpenVINO
324
- LOGGER.info(f'Loading {w} for OpenVINO inference...')
325
- check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
326
- import openvino.inference_engine as ie
327
- core = ie.IECore()
328
- network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths
329
- executable_network = core.load_network(network, device_name='CPU', num_requests=1)
330
  elif dnn: # ONNX OpenCV DNN
331
  LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
332
  check_requirements(('opencv-python>=4.5.4',))
@@ -338,6 +327,13 @@ class DetectMultiBackend(nn.Module):
338
  import onnxruntime
339
  providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
340
  session = onnxruntime.InferenceSession(w, providers=providers)
 
 
 
 
 
 
 
341
  elif engine: # TensorRT
342
  LOGGER.info(f'Loading {w} for TensorRT inference...')
343
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
@@ -356,9 +352,17 @@ class DetectMultiBackend(nn.Module):
356
  binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
357
  context = model.create_execution_context()
358
  batch_size = bindings['images'].shape[0]
359
- else: # TensorFlow (TFLite, pb, saved_model)
360
- if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
361
- LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')
 
 
 
 
 
 
 
 
362
  import tensorflow as tf
363
 
364
  def wrap_frozen_graph(gd, inputs, outputs):
@@ -369,19 +373,15 @@ class DetectMultiBackend(nn.Module):
369
  graph_def = tf.Graph().as_graph_def()
370
  graph_def.ParseFromString(open(w, 'rb').read())
371
  frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
372
- elif saved_model:
373
- LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')
374
- import tensorflow as tf
375
- model = tf.keras.models.load_model(w)
376
  elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
377
- if 'edgetpu' in w.lower():
378
  LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
379
  import tflite_runtime.interpreter as tfli
380
  delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime
381
  'Darwin': 'libedgetpu.1.dylib',
382
  'Windows': 'edgetpu.dll'}[platform.system()]
383
  interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
384
- else:
385
  LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
386
  import tensorflow as tf
387
  interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
@@ -396,21 +396,13 @@ class DetectMultiBackend(nn.Module):
396
  if self.pt or self.jit: # PyTorch
397
  y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)
398
  return y if val else y[0]
399
- elif self.coreml: # CoreML
400
- im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
401
- im = Image.fromarray((im[0] * 255).astype('uint8'))
402
- # im = im.resize((192, 320), Image.ANTIALIAS)
403
- y = self.model.predict({'image': im}) # coordinates are xywh normalized
404
- box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
405
- conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
406
- y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
407
- elif self.onnx: # ONNX
408
  im = im.cpu().numpy() # torch to numpy
409
- if self.dnn: # ONNX OpenCV DNN
410
- self.net.setInput(im)
411
- y = self.net.forward()
412
- else: # ONNX Runtime
413
- y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
414
  elif self.xml: # OpenVINO
415
  im = im.cpu().numpy() # FP32
416
  desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description
@@ -423,13 +415,21 @@ class DetectMultiBackend(nn.Module):
423
  self.binding_addrs['images'] = int(im.data_ptr())
424
  self.context.execute_v2(list(self.binding_addrs.values()))
425
  y = self.bindings['output'].data
426
- else: # TensorFlow model (TFLite, pb, saved_model)
427
  im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
428
- if self.pb:
429
- y = self.frozen_func(x=self.tf.constant(im)).numpy()
430
- elif self.saved_model:
 
 
 
 
 
 
431
  y = self.model(im, training=False).numpy()
432
- elif self.tflite:
 
 
433
  input, output = self.input_details[0], self.output_details[0]
434
  int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
435
  if int8:
@@ -451,7 +451,7 @@ class DetectMultiBackend(nn.Module):
451
 
452
  def warmup(self, imgsz=(1, 3, 640, 640), half=False):
453
  # Warmup model by running inference once
454
- if self.pt or self.engine or self.onnx: # warmup types
455
  if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models
456
  im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image
457
  self.forward(im) # warmup
 
316
  if extra_files['config.txt']:
317
  d = json.loads(extra_files['config.txt']) # extra_files dict
318
  stride, names = int(d['stride']), d['names']
 
 
 
 
 
 
 
 
 
 
 
319
  elif dnn: # ONNX OpenCV DNN
320
  LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
321
  check_requirements(('opencv-python>=4.5.4',))
 
327
  import onnxruntime
328
  providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
329
  session = onnxruntime.InferenceSession(w, providers=providers)
330
+ elif xml: # OpenVINO
331
+ LOGGER.info(f'Loading {w} for OpenVINO inference...')
332
+ check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
333
+ import openvino.inference_engine as ie
334
+ core = ie.IECore()
335
+ network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths
336
+ executable_network = core.load_network(network, device_name='CPU', num_requests=1)
337
  elif engine: # TensorRT
338
  LOGGER.info(f'Loading {w} for TensorRT inference...')
339
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
 
352
  binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
353
  context = model.create_execution_context()
354
  batch_size = bindings['images'].shape[0]
355
+ elif coreml: # CoreML
356
+ LOGGER.info(f'Loading {w} for CoreML inference...')
357
+ import coremltools as ct
358
+ model = ct.models.MLModel(w)
359
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
360
+ if saved_model: # SavedModel
361
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
362
+ import tensorflow as tf
363
+ model = tf.keras.models.load_model(w)
364
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
365
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
366
  import tensorflow as tf
367
 
368
  def wrap_frozen_graph(gd, inputs, outputs):
 
373
  graph_def = tf.Graph().as_graph_def()
374
  graph_def.ParseFromString(open(w, 'rb').read())
375
  frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
 
 
 
 
376
  elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
377
+ if 'edgetpu' in w.lower(): # Edge TPU
378
  LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
379
  import tflite_runtime.interpreter as tfli
380
  delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime
381
  'Darwin': 'libedgetpu.1.dylib',
382
  'Windows': 'edgetpu.dll'}[platform.system()]
383
  interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
384
+ else: # Lite
385
  LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
386
  import tensorflow as tf
387
  interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
 
396
  if self.pt or self.jit: # PyTorch
397
  y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)
398
  return y if val else y[0]
399
+ elif self.dnn: # ONNX OpenCV DNN
 
 
 
 
 
 
 
 
400
  im = im.cpu().numpy() # torch to numpy
401
+ self.net.setInput(im)
402
+ y = self.net.forward()
403
+ elif self.onnx: # ONNX Runtime
404
+ im = im.cpu().numpy() # torch to numpy
405
+ y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
406
  elif self.xml: # OpenVINO
407
  im = im.cpu().numpy() # FP32
408
  desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description
 
415
  self.binding_addrs['images'] = int(im.data_ptr())
416
  self.context.execute_v2(list(self.binding_addrs.values()))
417
  y = self.bindings['output'].data
418
+ elif self.coreml: # CoreML
419
  im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
420
+ im = Image.fromarray((im[0] * 255).astype('uint8'))
421
+ # im = im.resize((192, 320), Image.ANTIALIAS)
422
+ y = self.model.predict({'image': im}) # coordinates are xywh normalized
423
+ box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
424
+ conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
425
+ y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
426
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
427
+ im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
428
+ if self.saved_model: # SavedModel
429
  y = self.model(im, training=False).numpy()
430
+ elif self.pb: # GraphDef
431
+ y = self.frozen_func(x=self.tf.constant(im)).numpy()
432
+ elif self.tflite: # Lite
433
  input, output = self.input_details[0], self.output_details[0]
434
  int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
435
  if int8:
 
451
 
452
  def warmup(self, imgsz=(1, 3, 640, 640), half=False):
453
  # Warmup model by running inference once
454
+ if self.pt or self.jit or self.onnx or self.engine: # warmup types
455
  if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models
456
  im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image
457
  self.forward(im) # warmup
val.py CHANGED
@@ -9,13 +9,13 @@ Usage - formats:
9
  $ python path/to/val.py --weights yolov5s.pt # PyTorch
10
  yolov5s.torchscript # TorchScript
11
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
12
- yolov5s.mlmodel # CoreML (under development)
13
  yolov5s.xml # OpenVINO
 
 
14
  yolov5s_saved_model # TensorFlow SavedModel
15
- yolov5s.pb # TensorFlow protobuf
16
  yolov5s.tflite # TensorFlow Lite
17
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
18
- yolov5s.engine # TensorRT
19
  """
20
 
21
  import argparse
 
9
  $ python path/to/val.py --weights yolov5s.pt # PyTorch
10
  yolov5s.torchscript # TorchScript
11
  yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
 
12
  yolov5s.xml # OpenVINO
13
+ yolov5s.engine # TensorRT
14
+ yolov5s.mlmodel # CoreML (under development)
15
  yolov5s_saved_model # TensorFlow SavedModel
16
+ yolov5s.pb # TensorFlow GraphDef
17
  yolov5s.tflite # TensorFlow Lite
18
  yolov5s_edgetpu.tflite # TensorFlow Edge TPU
 
19
  """
20
 
21
  import argparse