glenn-jocher commited on
Commit
df7988d
1 Parent(s): 8cab44e

onnx_export.py

Browse files
.github/workflows/greetings.yml CHANGED
@@ -10,7 +10,7 @@ jobs:
10
  with:
11
  repo-token: ${{ secrets.GITHUB_TOKEN }}
12
  pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.'
13
- issue-message: >
14
  Hello @${{ github.actor }}, thank you for your interest in our work! Please visit our [Custom Training Tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) to get started, and see our [Google Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb), [Docker Image](https://hub.docker.com/r/ultralytics/yolov5), and [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) for example environments.
15
 
16
  If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
 
10
  with:
11
  repo-token: ${{ secrets.GITHUB_TOKEN }}
12
  pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.'
13
+ issue-message: |
14
  Hello @${{ github.actor }}, thank you for your interest in our work! Please visit our [Custom Training Tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) to get started, and see our [Google Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/tutorial.ipynb), [Docker Image](https://hub.docker.com/r/ultralytics/yolov5), and [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) for example environments.
15
 
16
  If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
README.md CHANGED
@@ -108,4 +108,4 @@ To access an up-to-date working environment (with all dependencies including CUD
108
 
109
  ## Contact
110
 
111
- **Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit us at https://www.ultralytics.com.
 
108
 
109
  ## Contact
110
 
111
+ **Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com.
detect.py CHANGED
@@ -7,12 +7,12 @@ ONNX_EXPORT = False
7
 
8
 
9
  def detect(save_img=False):
10
- imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
11
- out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
12
  webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
13
 
14
  # Initialize
15
- device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
16
  if os.path.exists(out):
17
  shutil.rmtree(out) # delete output folder
18
  os.makedirs(out) # make new output folder
@@ -35,20 +35,6 @@ def detect(save_img=False):
35
  # Fuse Conv2d + BatchNorm2d layers
36
  # model.fuse()
37
 
38
- # Export mode
39
- if ONNX_EXPORT:
40
- model.fuse()
41
- img = torch.zeros((1, 3) + imgsz) # (1, 3, 320, 192)
42
- f = opt.weights.replace(opt.weights.split('.')[-1], 'onnx') # *.onnx filename
43
- torch.onnx.export(model, img, f, verbose=False, opset_version=11)
44
-
45
- # Validate exported model
46
- import onnx
47
- model = onnx.load(f) # Load the ONNX model
48
- onnx.checker.check_model(model) # Check that the IR is well formed
49
- print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph
50
- return
51
-
52
  # Half precision
53
  half = half and device.type != 'cpu' # half precision only supported on CUDA
54
  if half:
 
7
 
8
 
9
  def detect(save_img=False):
10
+ out, source, weights, half, view_img, save_txt, imgsz = \
11
+ opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt, opt.img_size
12
  webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
13
 
14
  # Initialize
15
+ device = torch_utils.select_device(opt.device)
16
  if os.path.exists(out):
17
  shutil.rmtree(out) # delete output folder
18
  os.makedirs(out) # make new output folder
 
35
  # Fuse Conv2d + BatchNorm2d layers
36
  # model.fuse()
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # Half precision
39
  half = half and device.type != 'cpu' # half precision only supported on CUDA
40
  if half:
models/onnx_export.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import onnx
4
+
5
+ from models.common import *
6
+
7
+ if __name__ == '__main__':
8
+ parser = argparse.ArgumentParser()
9
+ parser.add_argument('--weights', default='../weights/yolov5s.pt', help='model path RELATIVE to ./models/')
10
+ parser.add_argument('--img-size', default=640, help='inference size (pixels)')
11
+ parser.add_argument('--batch-size', default=1, help='batch size')
12
+ opt = parser.parse_args()
13
+
14
+ # Parameters
15
+ f = opt.weights.replace('.pt', '.onnx') # onnx filename
16
+ img = torch.zeros((opt.batch_size, 3, opt.img_size, opt.img_size)) # image size, (1, 3, 320, 192) iDetection
17
+
18
+ # Load pytorch model
19
+ google_utils.attempt_download(opt.weights)
20
+ model = torch.load(opt.weights)['model']
21
+ model.eval()
22
+ # model.fuse() # optionally fuse Conv2d + BatchNorm2d layers TODO
23
+
24
+ # Export to onnx
25
+ model.model[-1].export = True # set Detect() layer export=True
26
+ torch.onnx.export(model, img, f, verbose=False, opset_version=11)
27
+
28
+ # Check onnx model
29
+ model = onnx.load(f) # load onnx model
30
+ onnx.checker.check_model(model) # check onnx model
31
+ print(onnx.helper.printable_graph(model.graph)) # print a human readable representation of the graph
32
+ print('Export complete. ONNX model saved to %s\nView with https://github.com/lutzroeder/netron' % f)