Spaces:
Sleeping
Sleeping
yan123yan
commited on
Commit
•
3ce0474
1
Parent(s):
f827af4
adding yolov5
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app.py +23 -5
- infer/yolov5/__init__.py +4 -0
- infer/yolov5/benchmarks.py +187 -0
- infer/yolov5/classify/__init__.py +0 -0
- infer/yolov5/classify/predict.py +235 -0
- infer/yolov5/classify/train.py +360 -0
- infer/yolov5/classify/val.py +182 -0
- infer/yolov5/cli.py +31 -0
- infer/yolov5/detect.py +270 -0
- infer/yolov5/export.py +829 -0
- infer/yolov5/get_results.py +75 -14
- infer/yolov5/helpers.py +463 -0
- infer/yolov5/hubconf.py +169 -0
- infer/yolov5/lung_en.yaml +28 -0
- infer/yolov5/models/__init__.py +0 -0
- infer/yolov5/models/common.py +878 -0
- infer/yolov5/models/experimental.py +111 -0
- infer/yolov5/models/hub/anchors.yaml +59 -0
- infer/yolov5/models/hub/yolov3-spp.yaml +51 -0
- infer/yolov5/models/hub/yolov3-tiny.yaml +41 -0
- infer/yolov5/models/hub/yolov3.yaml +51 -0
- infer/yolov5/models/hub/yolov5-bifpn.yaml +48 -0
- infer/yolov5/models/hub/yolov5-fpn.yaml +42 -0
- infer/yolov5/models/hub/yolov5-p2.yaml +54 -0
- infer/yolov5/models/hub/yolov5-p34.yaml +41 -0
- infer/yolov5/models/hub/yolov5-p6.yaml +56 -0
- infer/yolov5/models/hub/yolov5-p7.yaml +67 -0
- infer/yolov5/models/hub/yolov5-panet.yaml +48 -0
- infer/yolov5/models/hub/yolov5l6.yaml +60 -0
- infer/yolov5/models/hub/yolov5m6.yaml +60 -0
- infer/yolov5/models/hub/yolov5n6.yaml +60 -0
- infer/yolov5/models/hub/yolov5s-LeakyReLU.yaml +49 -0
- infer/yolov5/models/hub/yolov5s-ghost.yaml +48 -0
- infer/yolov5/models/hub/yolov5s-transformer.yaml +48 -0
- infer/yolov5/models/hub/yolov5s6.yaml +60 -0
- infer/yolov5/models/hub/yolov5x6.yaml +60 -0
- infer/yolov5/models/segment/yolov5l-seg.yaml +48 -0
- infer/yolov5/models/segment/yolov5m-seg.yaml +48 -0
- infer/yolov5/models/segment/yolov5n-seg.yaml +48 -0
- infer/yolov5/models/segment/yolov5s-seg.yaml +48 -0
- infer/yolov5/models/segment/yolov5x-seg.yaml +48 -0
- infer/yolov5/models/tf.py +608 -0
- infer/yolov5/models/yolo.py +391 -0
- infer/yolov5/models/yolov5l.yaml +48 -0
- infer/yolov5/models/yolov5m.yaml +48 -0
- infer/yolov5/models/yolov5n.yaml +48 -0
- infer/yolov5/models/yolov5s.yaml +48 -0
- infer/yolov5/models/yolov5x.yaml +48 -0
- infer/yolov5/segment/__init__.py +0 -0
- infer/yolov5/segment/predict.py +293 -0
app.py
CHANGED
@@ -3,6 +3,9 @@ import os
|
|
3 |
from PIL import Image
|
4 |
import random
|
5 |
import cv2
|
|
|
|
|
|
|
6 |
|
7 |
from infer.yolov7.get_results import get_yolov7_result
|
8 |
from infer.yolov5.get_results import get_yolov5_result
|
@@ -30,12 +33,13 @@ model_list = ['YOLO-V8',
|
|
30 |
|
31 |
st.set_page_config(layout="wide")
|
32 |
|
|
|
33 |
def inference(image, model_name, conf_threshold, iou_threshold):
|
34 |
if model_name == "YOLO-V7":
|
35 |
return get_yolov7_result(image, conf_threshold, iou_threshold, label_names)
|
36 |
elif model_name == "YOLO-V5":
|
37 |
-
|
38 |
-
return None, None
|
39 |
elif model_name == "YOLO-V8":
|
40 |
return get_yolov8_result(image, conf_threshold, iou_threshold, label_names)
|
41 |
else:
|
@@ -113,9 +117,23 @@ with body1:
|
|
113 |
with col:
|
114 |
st.button('Select', key=i, use_container_width=True, on_click=image_on_click, args=(i,))
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
body2 = st.container()
|
121 |
with body2:
|
|
|
3 |
from PIL import Image
|
4 |
import random
|
5 |
import cv2
|
6 |
+
import shutil
|
7 |
+
import sys
|
8 |
+
sys.dont_write_bytecode = True
|
9 |
|
10 |
from infer.yolov7.get_results import get_yolov7_result
|
11 |
from infer.yolov5.get_results import get_yolov5_result
|
|
|
33 |
|
34 |
st.set_page_config(layout="wide")
|
35 |
|
36 |
+
|
37 |
def inference(image, model_name, conf_threshold, iou_threshold):
|
38 |
if model_name == "YOLO-V7":
|
39 |
return get_yolov7_result(image, conf_threshold, iou_threshold, label_names)
|
40 |
elif model_name == "YOLO-V5":
|
41 |
+
return get_yolov5_result(image, conf_threshold, iou_threshold, label_names)
|
42 |
+
#return None, None
|
43 |
elif model_name == "YOLO-V8":
|
44 |
return get_yolov8_result(image, conf_threshold, iou_threshold, label_names)
|
45 |
else:
|
|
|
117 |
with col:
|
118 |
st.button('Select', key=i, use_container_width=True, on_click=image_on_click, args=(i,))
|
119 |
|
120 |
+
image_cols = st.columns(5)
|
121 |
+
for i, col in enumerate(image_cols, start=5):
|
122 |
+
with col:
|
123 |
+
image = Image.open(os.path.join(image_path, each_image_name[i]))
|
124 |
+
st.image(image, each_image_name[i].replace(".png", ""))
|
125 |
+
button_cols = st.columns(5)
|
126 |
+
for i, col in enumerate(button_cols, start=5):
|
127 |
+
with col:
|
128 |
+
st.button('Select', key=i, use_container_width=True, on_click=image_on_click, args=(i,))
|
129 |
+
|
130 |
+
component_col1, component_col2, component_col3 = st.columns(3)
|
131 |
+
with component_col1:
|
132 |
+
selected_model = st.selectbox('Select the inference model', model_list)
|
133 |
+
with component_col2:
|
134 |
+
conf_threshold = st.slider('Select the confidence threshold', 0.0, 1.0, 0.50)
|
135 |
+
with component_col3:
|
136 |
+
iou_threshold = st.slider('Select the IOU threshold', 0.0, 1.0, 0.01)
|
137 |
|
138 |
body2 = st.container()
|
139 |
with body2:
|
infer/yolov5/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from infer.yolov5.helpers import YOLOv5
|
2 |
+
from infer.yolov5.helpers import load_model as load
|
3 |
+
|
4 |
+
__version__ = "7.0.13"
|
infer/yolov5/benchmarks.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 benchmarks on all supported export formats
|
4 |
+
|
5 |
+
Format | `export.py --include` | Model
|
6 |
+
--- | --- | ---
|
7 |
+
PyTorch | - | yolov5s.pt
|
8 |
+
TorchScript | `torchscript` | yolov5s.torchscript
|
9 |
+
ONNX | `onnx` | yolov5s.onnx
|
10 |
+
OpenVINO | `openvino` | yolov5s_openvino_model/
|
11 |
+
TensorRT | `engine` | yolov5s.engine
|
12 |
+
CoreML | `coreml` | yolov5s.mlmodel
|
13 |
+
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
14 |
+
TensorFlow GraphDef | `pb` | yolov5s.pb
|
15 |
+
TensorFlow Lite | `tflite` | yolov5s.tflite
|
16 |
+
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
17 |
+
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
18 |
+
|
19 |
+
Requirements:
|
20 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
21 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
22 |
+
$ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
|
23 |
+
|
24 |
+
Usage:
|
25 |
+
$ yolov5 benchmarks --weights yolov5s.pt --img 640
|
26 |
+
"""
|
27 |
+
|
28 |
+
import argparse
|
29 |
+
import platform
|
30 |
+
import sys
|
31 |
+
import time
|
32 |
+
from pathlib import Path
|
33 |
+
|
34 |
+
import pandas as pd
|
35 |
+
|
36 |
+
FILE = Path(__file__).resolve()
|
37 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
38 |
+
if str(ROOT) not in sys.path:
|
39 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
40 |
+
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
41 |
+
|
42 |
+
import infer.yolov5.export
|
43 |
+
from infer.yolov5.models.experimental import attempt_load
|
44 |
+
from infer.yolov5.models.yolo import SegmentationModel
|
45 |
+
from infer.yolov5.segment.val import run as val_seg
|
46 |
+
from infer.yolov5.utils import notebook_init
|
47 |
+
from infer.yolov5.utils.general import LOGGER, check_yaml, file_size, print_args
|
48 |
+
from infer.yolov5.utils.torch_utils import select_device
|
49 |
+
from infer.yolov5.val import run as val_det
|
50 |
+
|
51 |
+
|
52 |
+
def run(
|
53 |
+
weights='yolov5s.pt', # weights path
|
54 |
+
imgsz=None, # inference size (pixels)
|
55 |
+
img=None, # inference size (pixels)
|
56 |
+
batch_size=None, # batch size
|
57 |
+
batch=None, # batch size
|
58 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
59 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
60 |
+
half=False, # use FP16 half-precision inference
|
61 |
+
test=False, # test exports only
|
62 |
+
pt_only=False, # test PyTorch only
|
63 |
+
hard_fail=False, # throw error on benchmark failure
|
64 |
+
):
|
65 |
+
if imgsz is None and img is None:
|
66 |
+
imgsz = 640
|
67 |
+
elif img is not None:
|
68 |
+
imgsz = img
|
69 |
+
if batch_size is None and batch is None:
|
70 |
+
batch_size = 1
|
71 |
+
elif batch is not None:
|
72 |
+
batch_size = batch
|
73 |
+
|
74 |
+
y, t = [], time.time()
|
75 |
+
device = select_device(device)
|
76 |
+
model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
|
77 |
+
for i, (name, f, suffix, cpu, gpu) in yolov5.export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
|
78 |
+
try:
|
79 |
+
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
80 |
+
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
81 |
+
if 'cpu' in device.type:
|
82 |
+
assert cpu, 'inference not supported on CPU'
|
83 |
+
if 'cuda' in device.type:
|
84 |
+
assert gpu, 'inference not supported on GPU'
|
85 |
+
|
86 |
+
# Export
|
87 |
+
if f == '-':
|
88 |
+
w = weights # PyTorch format
|
89 |
+
else:
|
90 |
+
w = yolov5.export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
|
91 |
+
assert suffix in str(w), 'export failed'
|
92 |
+
|
93 |
+
# Validate
|
94 |
+
if model_type == SegmentationModel:
|
95 |
+
result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
96 |
+
metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
|
97 |
+
else: # DetectionModel:
|
98 |
+
result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
|
99 |
+
metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
|
100 |
+
speed = result[2][1] # times (preprocess, inference, postprocess)
|
101 |
+
y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
|
102 |
+
except Exception as e:
|
103 |
+
if hard_fail:
|
104 |
+
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
105 |
+
LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}')
|
106 |
+
y.append([name, None, None, None]) # mAP, t_inference
|
107 |
+
if pt_only and i == 0:
|
108 |
+
break # break after PyTorch
|
109 |
+
|
110 |
+
# Print results
|
111 |
+
LOGGER.info('\n')
|
112 |
+
#parse_opt()
|
113 |
+
notebook_init() # print system info
|
114 |
+
c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
|
115 |
+
py = pd.DataFrame(y, columns=c)
|
116 |
+
LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
|
117 |
+
LOGGER.info(str(py if map else py.iloc[:, :2]))
|
118 |
+
if hard_fail and isinstance(hard_fail, str):
|
119 |
+
metrics = py['mAP50-95'].array # values to compare to floor
|
120 |
+
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
121 |
+
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
|
122 |
+
return py
|
123 |
+
|
124 |
+
|
125 |
+
def test(
|
126 |
+
weights=ROOT / 'yolov5s.pt', # weights path
|
127 |
+
imgsz=640, # inference size (pixels)
|
128 |
+
batch_size=1, # batch size
|
129 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
130 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
131 |
+
half=False, # use FP16 half-precision inference
|
132 |
+
test=False, # test exports only
|
133 |
+
pt_only=False, # test PyTorch only
|
134 |
+
hard_fail=False, # throw error on benchmark failure
|
135 |
+
):
|
136 |
+
y, t = [], time.time()
|
137 |
+
device = select_device(device)
|
138 |
+
for i, (name, f, suffix, gpu) in yolov5.export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
|
139 |
+
try:
|
140 |
+
w = weights if f == '-' else \
|
141 |
+
yolov5.export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
|
142 |
+
assert suffix in str(w), 'export failed'
|
143 |
+
y.append([name, True])
|
144 |
+
except Exception:
|
145 |
+
y.append([name, False]) # mAP, t_inference
|
146 |
+
|
147 |
+
# Print results
|
148 |
+
LOGGER.info('\n')
|
149 |
+
parse_opt()
|
150 |
+
notebook_init() # print system info
|
151 |
+
py = pd.DataFrame(y, columns=['Format', 'Export'])
|
152 |
+
LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
|
153 |
+
LOGGER.info(str(py))
|
154 |
+
return py
|
155 |
+
|
156 |
+
|
157 |
+
def run_cli(**kwargs):
|
158 |
+
'''
|
159 |
+
To be called from yolov5.cli
|
160 |
+
'''
|
161 |
+
_ = run(**kwargs)
|
162 |
+
|
163 |
+
|
164 |
+
def parse_opt():
|
165 |
+
parser = argparse.ArgumentParser()
|
166 |
+
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path')
|
167 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
168 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
169 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
170 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
171 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
172 |
+
parser.add_argument('--test', action='store_true', help='test exports only')
|
173 |
+
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
174 |
+
parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
|
175 |
+
opt = parser.parse_args()
|
176 |
+
opt.data = check_yaml(opt.data) # check YAML
|
177 |
+
print_args(vars(opt))
|
178 |
+
return opt
|
179 |
+
|
180 |
+
|
181 |
+
def main():
|
182 |
+
opt = parse_opt()
|
183 |
+
test(**vars(opt)) if opt.test else run(**vars(opt))
|
184 |
+
|
185 |
+
|
186 |
+
if __name__ == "__main__":
|
187 |
+
main()
|
infer/yolov5/classify/__init__.py
ADDED
File without changes
|
infer/yolov5/classify/predict.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
|
19 |
+
yolov5s-cls.torchscript # TorchScript
|
20 |
+
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s-cls_openvino_model # OpenVINO
|
22 |
+
yolov5s-cls.engine # TensorRT
|
23 |
+
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s-cls_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s-cls.pb # TensorFlow GraphDef
|
26 |
+
yolov5s-cls.tflite # TensorFlow Lite
|
27 |
+
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s-cls_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
import torch.nn.functional as F
|
39 |
+
|
40 |
+
FILE = Path(__file__).resolve()
|
41 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
42 |
+
if str(ROOT) not in sys.path:
|
43 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
44 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
45 |
+
|
46 |
+
from infer.yolov5.models.common import DetectMultiBackend
|
47 |
+
from infer.yolov5.utils.augmentations import classify_transforms
|
48 |
+
from infer.yolov5.utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
49 |
+
from infer.yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
50 |
+
increment_path, print_args, strip_optimizer)
|
51 |
+
from infer.yolov5.utils.plots import Annotator
|
52 |
+
from infer.yolov5.utils.torch_utils import select_device, smart_inference_mode
|
53 |
+
|
54 |
+
|
55 |
+
@smart_inference_mode()
|
56 |
+
def run(
|
57 |
+
weights='yolov5s-cls.pt', # model.pt path(s)
|
58 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
59 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
60 |
+
imgsz=None, # inference size (height, width)
|
61 |
+
img=None, # inference size (pixels)
|
62 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
63 |
+
view_img=False, # show results
|
64 |
+
save_txt=False, # save results to *.txt
|
65 |
+
nosave=False, # do not save images/videos
|
66 |
+
augment=False, # augmented inference
|
67 |
+
visualize=False, # visualize features
|
68 |
+
update=False, # update all models
|
69 |
+
project='runs/predict-cls', # save results to project/name
|
70 |
+
name='exp', # save results to project/name
|
71 |
+
exist_ok=False, # existing project/name ok, do not increment
|
72 |
+
half=False, # use FP16 half-precision inference
|
73 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
74 |
+
vid_stride=1, # video frame-rate stride
|
75 |
+
):
|
76 |
+
source = str(source)
|
77 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
78 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
79 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
80 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
81 |
+
screenshot = source.lower().startswith('screen')
|
82 |
+
if is_url and is_file:
|
83 |
+
source = check_file(source) # download
|
84 |
+
|
85 |
+
if imgsz is None and img is None:
|
86 |
+
imgsz = 224
|
87 |
+
elif img is not None:
|
88 |
+
imgsz = img
|
89 |
+
|
90 |
+
if isinstance(imgsz, int):
|
91 |
+
imgsz = [imgsz, imgsz]
|
92 |
+
|
93 |
+
# Directories
|
94 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
95 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
96 |
+
|
97 |
+
# Load model
|
98 |
+
device = select_device(device)
|
99 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
100 |
+
stride, names, pt = model.stride, model.names, model.pt
|
101 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
102 |
+
|
103 |
+
# Dataloader
|
104 |
+
bs = 1 # batch_size
|
105 |
+
if webcam:
|
106 |
+
view_img = check_imshow(warn=True)
|
107 |
+
dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
108 |
+
bs = len(dataset)
|
109 |
+
elif screenshot:
|
110 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
111 |
+
else:
|
112 |
+
dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
113 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
114 |
+
|
115 |
+
# Run inference
|
116 |
+
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
117 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
118 |
+
for path, im, im0s, vid_cap, s in dataset:
|
119 |
+
with dt[0]:
|
120 |
+
im = torch.Tensor(im).to(model.device)
|
121 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
122 |
+
if len(im.shape) == 3:
|
123 |
+
im = im[None] # expand for batch dim
|
124 |
+
|
125 |
+
# Inference
|
126 |
+
with dt[1]:
|
127 |
+
results = model(im)
|
128 |
+
|
129 |
+
# Post-process
|
130 |
+
with dt[2]:
|
131 |
+
pred = F.softmax(results, dim=1) # probabilities
|
132 |
+
|
133 |
+
# Process predictions
|
134 |
+
for i, prob in enumerate(pred): # per image
|
135 |
+
seen += 1
|
136 |
+
if webcam: # batch_size >= 1
|
137 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
138 |
+
s += f'{i}: '
|
139 |
+
else:
|
140 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
141 |
+
|
142 |
+
p = Path(p) # to Path
|
143 |
+
save_path = str(save_dir / p.name) # im.jpg
|
144 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
145 |
+
|
146 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
147 |
+
annotator = Annotator(im0, example=str(names), pil=True)
|
148 |
+
|
149 |
+
# Print results
|
150 |
+
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
|
151 |
+
s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
152 |
+
|
153 |
+
# Write results
|
154 |
+
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
155 |
+
if save_img or view_img: # Add bbox to image
|
156 |
+
annotator.text((32, 32), text, txt_color=(255, 255, 255))
|
157 |
+
if save_txt: # Write to file
|
158 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
159 |
+
f.write(text + '\n')
|
160 |
+
|
161 |
+
# Stream results
|
162 |
+
im0 = annotator.result()
|
163 |
+
if view_img:
|
164 |
+
if platform.system() == 'Linux' and p not in windows:
|
165 |
+
windows.append(p)
|
166 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
167 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
168 |
+
cv2.imshow(str(p), im0)
|
169 |
+
cv2.waitKey(1) # 1 millisecond
|
170 |
+
|
171 |
+
# Save results (image with detections)
|
172 |
+
if save_img:
|
173 |
+
if dataset.mode == 'image':
|
174 |
+
cv2.imwrite(save_path, im0)
|
175 |
+
else: # 'video' or 'stream'
|
176 |
+
if vid_path[i] != save_path: # new video
|
177 |
+
vid_path[i] = save_path
|
178 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
179 |
+
vid_writer[i].release() # release previous video writer
|
180 |
+
if vid_cap: # video
|
181 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
182 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
183 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
184 |
+
else: # stream
|
185 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
186 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
187 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
188 |
+
vid_writer[i].write(im0)
|
189 |
+
|
190 |
+
# Print time (inference-only)
|
191 |
+
LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms')
|
192 |
+
|
193 |
+
# Print results
|
194 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
195 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
196 |
+
if save_txt or save_img:
|
197 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
198 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
199 |
+
if update:
|
200 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
201 |
+
|
202 |
+
|
203 |
+
def parse_opt():
|
204 |
+
parser = argparse.ArgumentParser()
|
205 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s-cls.pt', help='model path(s)')
|
206 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
207 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
208 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
|
209 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
210 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
211 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
212 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
213 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
214 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
215 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
216 |
+
parser.add_argument('--project', default='runs/predict-cls', help='save results to project/name')
|
217 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
218 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
219 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
220 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
221 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
222 |
+
opt = parser.parse_args()
|
223 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
224 |
+
print_args(vars(opt))
|
225 |
+
return opt
|
226 |
+
|
227 |
+
|
228 |
+
def main():
|
229 |
+
opt = parse_opt()
|
230 |
+
check_requirements(exclude=('tensorboard', 'thop'))
|
231 |
+
run(**vars(opt))
|
232 |
+
|
233 |
+
|
234 |
+
if __name__ == "__main__":
|
235 |
+
main()
|
infer/yolov5/classify/train.py
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Train a YOLOv5 classifier model on a classification dataset
|
4 |
+
|
5 |
+
Usage - Single-GPU training:
|
6 |
+
$ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
|
7 |
+
|
8 |
+
Usage - Multi-GPU DDP training:
|
9 |
+
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
10 |
+
|
11 |
+
Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
|
12 |
+
YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
|
13 |
+
Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
|
14 |
+
"""
|
15 |
+
|
16 |
+
import argparse
|
17 |
+
import os
|
18 |
+
import subprocess
|
19 |
+
import sys
|
20 |
+
import time
|
21 |
+
from copy import deepcopy
|
22 |
+
from datetime import datetime
|
23 |
+
from pathlib import Path
|
24 |
+
|
25 |
+
import torch
|
26 |
+
import torch.distributed as dist
|
27 |
+
import torch.hub as hub
|
28 |
+
import torch.optim.lr_scheduler as lr_scheduler
|
29 |
+
import torchvision
|
30 |
+
from torch.cuda import amp
|
31 |
+
from tqdm import tqdm
|
32 |
+
|
33 |
+
from infer.yolov5.utils.downloads import attempt_download_from_hub
|
34 |
+
from infer.yolov5.utils.roboflow import RoboflowConnector
|
35 |
+
|
36 |
+
FILE = Path(__file__).resolve()
|
37 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
38 |
+
if str(ROOT) not in sys.path:
|
39 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
40 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
41 |
+
|
42 |
+
from infer.yolov5.classify import val as validate
|
43 |
+
from infer.yolov5.models.experimental import attempt_load
|
44 |
+
from infer.yolov5.models.yolo import ClassificationModel, DetectionModel
|
45 |
+
from infer.yolov5.utils.dataloaders import create_classification_dataloader
|
46 |
+
from infer.yolov5.utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
|
47 |
+
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
|
48 |
+
from infer.yolov5.utils.loggers import GenericLogger
|
49 |
+
from infer.yolov5.utils.plots import imshow_cls
|
50 |
+
from infer.yolov5.utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
|
51 |
+
smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
|
52 |
+
|
53 |
+
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
54 |
+
RANK = int(os.getenv('RANK', -1))
|
55 |
+
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
56 |
+
#GIT_INFO = check_git_info()
|
57 |
+
|
58 |
+
|
59 |
+
def train(opt, device):
|
60 |
+
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
61 |
+
save_dir, data, bs, epochs, nw, imgsz, pretrained = \
|
62 |
+
opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
|
63 |
+
opt.imgsz, str(opt.pretrained).lower() == 'true'
|
64 |
+
cuda = device.type != 'cpu'
|
65 |
+
|
66 |
+
# Directories
|
67 |
+
wdir = save_dir / 'weights'
|
68 |
+
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
69 |
+
last, best = wdir / 'last.pt', wdir / 'best.pt'
|
70 |
+
|
71 |
+
# Save run settings
|
72 |
+
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
73 |
+
|
74 |
+
# Logger
|
75 |
+
logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
|
76 |
+
|
77 |
+
# Download Dataset
|
78 |
+
with torch_distributed_zero_first(LOCAL_RANK):
|
79 |
+
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
|
80 |
+
if not data_dir.is_dir():
|
81 |
+
LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...')
|
82 |
+
t = time.time()
|
83 |
+
if str(data) == 'imagenet':
|
84 |
+
subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True)
|
85 |
+
else:
|
86 |
+
url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
|
87 |
+
download(url, dir=data_dir.parent)
|
88 |
+
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
|
89 |
+
LOGGER.info(s)
|
90 |
+
|
91 |
+
# Dataloaders
|
92 |
+
nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes
|
93 |
+
trainloader = create_classification_dataloader(path=data_dir / 'train',
|
94 |
+
imgsz=imgsz,
|
95 |
+
batch_size=bs // WORLD_SIZE,
|
96 |
+
augment=True,
|
97 |
+
cache=opt.cache,
|
98 |
+
rank=LOCAL_RANK,
|
99 |
+
workers=nw)
|
100 |
+
|
101 |
+
test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val
|
102 |
+
if RANK in {-1, 0}:
|
103 |
+
testloader = create_classification_dataloader(path=test_dir,
|
104 |
+
imgsz=imgsz,
|
105 |
+
batch_size=bs // WORLD_SIZE * 2,
|
106 |
+
augment=False,
|
107 |
+
cache=opt.cache,
|
108 |
+
rank=-1,
|
109 |
+
workers=nw)
|
110 |
+
|
111 |
+
# Model
|
112 |
+
# try to download from hf hub
|
113 |
+
result = attempt_download_from_hub(opt.model, hf_token=None)
|
114 |
+
if result is not None:
|
115 |
+
opt.model = result
|
116 |
+
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
117 |
+
if Path(opt.model).is_file() or opt.model.endswith('.pt'):
|
118 |
+
model = attempt_load(opt.model, device='cpu', fuse=False)
|
119 |
+
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
|
120 |
+
model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
|
121 |
+
else:
|
122 |
+
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
123 |
+
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
124 |
+
if isinstance(model, DetectionModel):
|
125 |
+
LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
126 |
+
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
127 |
+
reshape_classifier_output(model, nc) # update class count
|
128 |
+
for m in model.modules():
|
129 |
+
if not pretrained and hasattr(m, 'reset_parameters'):
|
130 |
+
m.reset_parameters()
|
131 |
+
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
|
132 |
+
m.p = opt.dropout # set dropout
|
133 |
+
for p in model.parameters():
|
134 |
+
p.requires_grad = True # for training
|
135 |
+
model = model.to(device)
|
136 |
+
|
137 |
+
# Info
|
138 |
+
if RANK in {-1, 0}:
|
139 |
+
model.names = trainloader.dataset.classes # attach class names
|
140 |
+
model.transforms = testloader.dataset.torch_transforms # attach inference transforms
|
141 |
+
model_info(model)
|
142 |
+
if opt.verbose:
|
143 |
+
LOGGER.info(model)
|
144 |
+
images, labels = next(iter(trainloader))
|
145 |
+
file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
|
146 |
+
logger.log_images(file, name='Train Examples')
|
147 |
+
logger.log_graph(model, imgsz) # log model
|
148 |
+
|
149 |
+
# Optimizer
|
150 |
+
optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
|
151 |
+
|
152 |
+
# Scheduler
|
153 |
+
lrf = 0.01 # final lr (fraction of lr0)
|
154 |
+
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
155 |
+
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
156 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
157 |
+
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
158 |
+
# final_div_factor=1 / 25 / lrf)
|
159 |
+
|
160 |
+
# EMA
|
161 |
+
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
162 |
+
|
163 |
+
# DDP mode
|
164 |
+
if cuda and RANK != -1:
|
165 |
+
model = smart_DDP(model)
|
166 |
+
|
167 |
+
# Train
|
168 |
+
t0 = time.time()
|
169 |
+
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
|
170 |
+
best_fitness = 0.0
|
171 |
+
scaler = amp.GradScaler(enabled=cuda)
|
172 |
+
val = test_dir.stem # 'val' or 'test'
|
173 |
+
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
|
174 |
+
f'Using {nw * WORLD_SIZE} dataloader workers\n'
|
175 |
+
f"Logging results to {colorstr('bold', save_dir)}\n"
|
176 |
+
f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
|
177 |
+
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
178 |
+
for epoch in range(epochs): # loop over the dataset multiple times
|
179 |
+
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
|
180 |
+
model.train()
|
181 |
+
if RANK != -1:
|
182 |
+
trainloader.sampler.set_epoch(epoch)
|
183 |
+
pbar = enumerate(trainloader)
|
184 |
+
if RANK in {-1, 0}:
|
185 |
+
pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
|
186 |
+
for i, (images, labels) in pbar: # progress bar
|
187 |
+
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
188 |
+
|
189 |
+
# Forward
|
190 |
+
with amp.autocast(enabled=cuda): # stability issues when enabled
|
191 |
+
loss = criterion(model(images), labels)
|
192 |
+
|
193 |
+
# Backward
|
194 |
+
scaler.scale(loss).backward()
|
195 |
+
|
196 |
+
# Optimize
|
197 |
+
scaler.unscale_(optimizer) # unscale gradients
|
198 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
199 |
+
scaler.step(optimizer)
|
200 |
+
scaler.update()
|
201 |
+
optimizer.zero_grad()
|
202 |
+
if ema:
|
203 |
+
ema.update(model)
|
204 |
+
|
205 |
+
if RANK in {-1, 0}:
|
206 |
+
# Print
|
207 |
+
tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
|
208 |
+
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
209 |
+
pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
|
210 |
+
|
211 |
+
# Test
|
212 |
+
if i == len(pbar) - 1: # last batch
|
213 |
+
top1, top5, vloss = validate.run(model=ema.ema,
|
214 |
+
dataloader=testloader,
|
215 |
+
criterion=criterion,
|
216 |
+
pbar=pbar) # test accuracy, loss
|
217 |
+
fitness = top1 # define fitness as top1 accuracy
|
218 |
+
|
219 |
+
# Scheduler
|
220 |
+
scheduler.step()
|
221 |
+
|
222 |
+
# Log metrics
|
223 |
+
if RANK in {-1, 0}:
|
224 |
+
# Best fitness
|
225 |
+
if fitness > best_fitness:
|
226 |
+
best_fitness = fitness
|
227 |
+
|
228 |
+
# Log
|
229 |
+
metrics = {
|
230 |
+
'train/loss': tloss,
|
231 |
+
f'{val}/loss': vloss,
|
232 |
+
'metrics/accuracy_top1': top1,
|
233 |
+
'metrics/accuracy_top5': top5,
|
234 |
+
'lr/0': optimizer.param_groups[0]['lr']} # learning rate
|
235 |
+
logger.log_metrics(metrics, epoch)
|
236 |
+
|
237 |
+
# Save model
|
238 |
+
final_epoch = epoch + 1 == epochs
|
239 |
+
if (not opt.nosave) or final_epoch:
|
240 |
+
ckpt = {
|
241 |
+
'epoch': epoch,
|
242 |
+
'best_fitness': best_fitness,
|
243 |
+
'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
|
244 |
+
'ema': None, # deepcopy(ema.ema).half(),
|
245 |
+
'updates': ema.updates,
|
246 |
+
'optimizer': None, # optimizer.state_dict(),
|
247 |
+
'opt': vars(opt),
|
248 |
+
'date': datetime.now().isoformat()}
|
249 |
+
|
250 |
+
# Save last, best and delete
|
251 |
+
torch.save(ckpt, last)
|
252 |
+
if best_fitness == fitness:
|
253 |
+
torch.save(ckpt, best)
|
254 |
+
del ckpt
|
255 |
+
|
256 |
+
# Train complete
|
257 |
+
if RANK in {-1, 0} and final_epoch:
|
258 |
+
LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
|
259 |
+
f"\nResults saved to {colorstr('bold', save_dir)}"
|
260 |
+
f"\nPredict: yolov5 classify predict --weights {best} --source im.jpg"
|
261 |
+
f"\nValidate: yolov5 classify val --weights {best} --data {data_dir}"
|
262 |
+
f"\nExport: yolov5 export --weights {best} --include onnx"
|
263 |
+
f"\nPython: model = yolov5.load('{best}')"
|
264 |
+
f"\nVisualize: https://netron.app\n")
|
265 |
+
|
266 |
+
# Plot examples
|
267 |
+
images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
|
268 |
+
pred = torch.max(ema.ema(images.to(device)), 1)[1]
|
269 |
+
file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg')
|
270 |
+
|
271 |
+
# Log results
|
272 |
+
meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()}
|
273 |
+
logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
|
274 |
+
logger.log_model(best, epochs, metadata=meta)
|
275 |
+
|
276 |
+
|
277 |
+
def parse_opt(known=False):
|
278 |
+
parser = argparse.ArgumentParser()
|
279 |
+
parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
|
280 |
+
parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
|
281 |
+
parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
|
282 |
+
parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
|
283 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)')
|
284 |
+
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
285 |
+
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
|
286 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
287 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
288 |
+
parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
|
289 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
290 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
291 |
+
parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
|
292 |
+
parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
|
293 |
+
parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
|
294 |
+
parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
|
295 |
+
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
|
296 |
+
parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
|
297 |
+
parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
|
298 |
+
parser.add_argument('--verbose', action='store_true', help='Verbose mode')
|
299 |
+
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
300 |
+
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
301 |
+
|
302 |
+
# Roboflow arguments
|
303 |
+
parser.add_argument('--roboflow_token', type=str, default=None, help='roboflow api token')
|
304 |
+
|
305 |
+
return parser.parse_known_args()[0] if known else parser.parse_args()
|
306 |
+
|
307 |
+
|
308 |
+
def main(opt):
|
309 |
+
# Checks
|
310 |
+
if RANK in {-1, 0}:
|
311 |
+
print_args(vars(opt))
|
312 |
+
check_git_status()
|
313 |
+
check_requirements()
|
314 |
+
|
315 |
+
if "roboflow.com" in str(opt.data):
|
316 |
+
opt.data = RoboflowConnector.download_dataset(
|
317 |
+
url=opt.data,
|
318 |
+
roboflow_token=opt.roboflow_token,
|
319 |
+
task="classify",
|
320 |
+
location=ROOT.absolute().as_posix()
|
321 |
+
)
|
322 |
+
|
323 |
+
# DDP mode
|
324 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
325 |
+
if LOCAL_RANK != -1:
|
326 |
+
assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
|
327 |
+
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
328 |
+
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
329 |
+
torch.cuda.set_device(LOCAL_RANK)
|
330 |
+
device = torch.device('cuda', LOCAL_RANK)
|
331 |
+
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
332 |
+
|
333 |
+
# Parameters
|
334 |
+
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
|
335 |
+
|
336 |
+
# Train
|
337 |
+
train(opt, device)
|
338 |
+
|
339 |
+
|
340 |
+
def run(**kwargs):
|
341 |
+
# Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
|
342 |
+
opt = parse_opt(True)
|
343 |
+
for k, v in kwargs.items():
|
344 |
+
setattr(opt, k, v)
|
345 |
+
main(opt)
|
346 |
+
return opt
|
347 |
+
|
348 |
+
def run_cli(**kwargs):
|
349 |
+
'''
|
350 |
+
To be called from yolov5.cli
|
351 |
+
'''
|
352 |
+
opt = parse_opt(True)
|
353 |
+
for k, v in kwargs.items():
|
354 |
+
setattr(opt, k, v)
|
355 |
+
main(opt)
|
356 |
+
|
357 |
+
|
358 |
+
if __name__ == '__main__':
|
359 |
+
opt = parse_opt()
|
360 |
+
main(opt)
|
infer/yolov5/classify/val.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Validate a trained YOLOv5 classification model on a classification dataset
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
$ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
7 |
+
$ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
|
8 |
+
|
9 |
+
Usage - formats:
|
10 |
+
$ python classify/val.py --weights yolov5s-cls.pt # PyTorch
|
11 |
+
yolov5s-cls.torchscript # TorchScript
|
12 |
+
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
13 |
+
yolov5s-cls_openvino_model # OpenVINO
|
14 |
+
yolov5s-cls.engine # TensorRT
|
15 |
+
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
16 |
+
yolov5s-cls_saved_model # TensorFlow SavedModel
|
17 |
+
yolov5s-cls.pb # TensorFlow GraphDef
|
18 |
+
yolov5s-cls.tflite # TensorFlow Lite
|
19 |
+
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
20 |
+
yolov5s-cls_paddle_model # PaddlePaddle
|
21 |
+
"""
|
22 |
+
|
23 |
+
import argparse
|
24 |
+
import os
|
25 |
+
import sys
|
26 |
+
from pathlib import Path
|
27 |
+
|
28 |
+
import torch
|
29 |
+
from tqdm import tqdm
|
30 |
+
|
31 |
+
FILE = Path(__file__).resolve()
|
32 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
33 |
+
if str(ROOT) not in sys.path:
|
34 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
35 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
36 |
+
|
37 |
+
from infer.yolov5.models.common import DetectMultiBackend
|
38 |
+
from infer.yolov5.utils.dataloaders import create_classification_dataloader
|
39 |
+
from infer.yolov5.utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
|
40 |
+
increment_path, print_args)
|
41 |
+
from infer.yolov5.utils.torch_utils import select_device, smart_inference_mode
|
42 |
+
|
43 |
+
|
44 |
+
@smart_inference_mode()
|
45 |
+
def run(
|
46 |
+
data='../datasets/mnist', # dataset dir
|
47 |
+
weights='yolov5s-cls.pt', # model.pt path(s)
|
48 |
+
batch_size=None, # batch size
|
49 |
+
batch=None, # batch size
|
50 |
+
imgsz=None, # inference size (pixels)
|
51 |
+
img=None, # inference size (pixels)
|
52 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
53 |
+
workers=8, # max dataloader workers (per RANK in DDP mode)
|
54 |
+
verbose=False, # verbose output
|
55 |
+
project='runs/val-cls', # save to project/name
|
56 |
+
name='exp', # save to project/name
|
57 |
+
exist_ok=False, # existing project/name ok, do not increment
|
58 |
+
half=False, # use FP16 half-precision inference
|
59 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
60 |
+
model=None,
|
61 |
+
dataloader=None,
|
62 |
+
criterion=None,
|
63 |
+
pbar=None,
|
64 |
+
):
|
65 |
+
|
66 |
+
if imgsz is None and img is None:
|
67 |
+
imgsz = 224
|
68 |
+
elif img is not None:
|
69 |
+
imgsz = img
|
70 |
+
if batch_size is None and batch is None:
|
71 |
+
batch_size = 128
|
72 |
+
elif batch is not None:
|
73 |
+
batch_size = batch
|
74 |
+
|
75 |
+
# Initialize/load model and set device
|
76 |
+
training = model is not None
|
77 |
+
if training: # called by train.py
|
78 |
+
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
79 |
+
half &= device.type != 'cpu' # half precision only supported on CUDA
|
80 |
+
model.half() if half else model.float()
|
81 |
+
else: # called directly
|
82 |
+
device = select_device(device, batch_size=batch_size)
|
83 |
+
|
84 |
+
# Directories
|
85 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
86 |
+
save_dir.mkdir(parents=True, exist_ok=True) # make dir
|
87 |
+
|
88 |
+
# Load model
|
89 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
|
90 |
+
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
91 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
92 |
+
half = model.fp16 # FP16 supported on limited backends with CUDA
|
93 |
+
if engine:
|
94 |
+
batch_size = model.batch_size
|
95 |
+
else:
|
96 |
+
device = model.device
|
97 |
+
if not (pt or jit):
|
98 |
+
batch_size = 1 # export.py models default to batch-size 1
|
99 |
+
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
100 |
+
|
101 |
+
# Dataloader
|
102 |
+
data = Path(data)
|
103 |
+
test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
|
104 |
+
dataloader = create_classification_dataloader(path=test_dir,
|
105 |
+
imgsz=imgsz,
|
106 |
+
batch_size=batch_size,
|
107 |
+
augment=False,
|
108 |
+
rank=-1,
|
109 |
+
workers=workers)
|
110 |
+
|
111 |
+
model.eval()
|
112 |
+
pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
|
113 |
+
n = len(dataloader) # number of batches
|
114 |
+
action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
|
115 |
+
desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
|
116 |
+
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
117 |
+
with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
|
118 |
+
for images, labels in bar:
|
119 |
+
with dt[0]:
|
120 |
+
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
121 |
+
|
122 |
+
with dt[1]:
|
123 |
+
y = model(images)
|
124 |
+
|
125 |
+
with dt[2]:
|
126 |
+
pred.append(y.argsort(1, descending=True)[:, :5])
|
127 |
+
targets.append(labels)
|
128 |
+
if criterion:
|
129 |
+
loss += criterion(y, labels)
|
130 |
+
|
131 |
+
loss /= n
|
132 |
+
pred, targets = torch.cat(pred), torch.cat(targets)
|
133 |
+
correct = (targets[:, None] == pred).float()
|
134 |
+
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
|
135 |
+
top1, top5 = acc.mean(0).tolist()
|
136 |
+
|
137 |
+
if pbar:
|
138 |
+
pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
|
139 |
+
if verbose: # all classes
|
140 |
+
LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
141 |
+
LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
|
142 |
+
for i, c in model.names.items():
|
143 |
+
acc_i = acc[targets == i]
|
144 |
+
top1i, top5i = acc_i.mean(0).tolist()
|
145 |
+
LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
|
146 |
+
|
147 |
+
# Print results
|
148 |
+
t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
|
149 |
+
shape = (1, 3, imgsz, imgsz)
|
150 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
|
151 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
152 |
+
|
153 |
+
return top1, top5, loss
|
154 |
+
|
155 |
+
|
156 |
+
def parse_opt():
|
157 |
+
parser = argparse.ArgumentParser()
|
158 |
+
parser.add_argument('--data', type=str, default='../datasets/mnist', help='dataset path')
|
159 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s-cls.pt', help='model.pt path(s)')
|
160 |
+
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
|
161 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
|
162 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
163 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
164 |
+
parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
|
165 |
+
parser.add_argument('--project', default='runs/val-cls', help='save to project/name')
|
166 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
167 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
168 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
169 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
170 |
+
opt = parser.parse_args()
|
171 |
+
print_args(vars(opt))
|
172 |
+
return opt
|
173 |
+
|
174 |
+
|
175 |
+
def main():
|
176 |
+
opt = parse_opt()
|
177 |
+
check_requirements(exclude=('tensorboard', 'thop'))
|
178 |
+
run(**vars(opt))
|
179 |
+
|
180 |
+
|
181 |
+
if __name__ == "__main__":
|
182 |
+
main()
|
infer/yolov5/cli.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fire
|
2 |
+
|
3 |
+
from infer.yolov5.benchmarks import run_cli as benchmarks
|
4 |
+
from infer.yolov5.classify.predict import run as classify_predict
|
5 |
+
from infer.yolov5.classify.train import run_cli as classify_train
|
6 |
+
from infer.yolov5.classify.val import run as classify_val
|
7 |
+
from infer.yolov5.detect import run as detect
|
8 |
+
from infer.yolov5.export import run as export
|
9 |
+
from infer.yolov5.segment.predict import run as segment_predict
|
10 |
+
from infer.yolov5.segment.train import run_cli as segment_train
|
11 |
+
from infer.yolov5.segment.val import run as segment_val
|
12 |
+
from infer.yolov5.train import run_cli as train
|
13 |
+
from infer.yolov5.val import run as val
|
14 |
+
|
15 |
+
|
16 |
+
def app() -> None:
|
17 |
+
"""Cli app."""
|
18 |
+
fire.Fire(
|
19 |
+
{
|
20 |
+
"train": train,
|
21 |
+
"val": val,
|
22 |
+
"detect": detect,
|
23 |
+
"export": export,
|
24 |
+
"benchmarks": benchmarks,
|
25 |
+
'classify': {'train': classify_train, 'val': classify_val, 'predict': classify_predict},
|
26 |
+
'segment': {'train': segment_train, 'val': segment_val, 'predict': segment_predict},
|
27 |
+
}
|
28 |
+
)
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
app()
|
infer/yolov5/detect.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ yolov5 detect --weights yolov5s.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ yolov5 detect --weights yolov5s.pt # PyTorch
|
19 |
+
yolov5s.torchscript # TorchScript
|
20 |
+
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s_openvino_model # OpenVINO
|
22 |
+
yolov5s.engine # TensorRT
|
23 |
+
yolov5s.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s.pb # TensorFlow GraphDef
|
26 |
+
yolov5s.tflite # TensorFlow Lite
|
27 |
+
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
|
39 |
+
FILE = Path(__file__).resolve()
|
40 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
41 |
+
if str(ROOT) not in sys.path:
|
42 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
+
|
45 |
+
from infer.yolov5.models.common import DetectMultiBackend
|
46 |
+
from infer.yolov5.utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
+
from infer.yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
+
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
|
49 |
+
from infer.yolov5.utils.plots import Annotator, colors, save_one_box
|
50 |
+
from infer.yolov5.utils.torch_utils import select_device, smart_inference_mode
|
51 |
+
|
52 |
+
|
53 |
+
@smart_inference_mode()
|
54 |
+
def run(
|
55 |
+
weights='yolov5s.pt', # model path or triton URL
|
56 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
57 |
+
data=ROOT / 'lung-en.yaml', # dataset.yaml path
|
58 |
+
imgsz=None, # inference size (height, width)
|
59 |
+
img=None, # inference size (pixels)
|
60 |
+
conf_thres=0.25, # confidence threshold
|
61 |
+
iou_thres=0.45, # NMS IOU threshold
|
62 |
+
max_det=1000, # maximum detections per image
|
63 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
64 |
+
view_img=False, # show results
|
65 |
+
save_txt=False, # save results to *.txt
|
66 |
+
save_conf=False, # save confidences in --save-txt labels
|
67 |
+
save_crop=False, # save cropped prediction boxes
|
68 |
+
nosave=False, # do not save images/videos
|
69 |
+
classes=None, # filter by class: --class 0, or --class 0 2 3
|
70 |
+
agnostic_nms=False, # class-agnostic NMS
|
71 |
+
augment=False, # augmented inference
|
72 |
+
visualize=False, # visualize features
|
73 |
+
update=False, # update all models
|
74 |
+
project='runs/detect', # save results to project/name
|
75 |
+
name='exp', # save results to project/name
|
76 |
+
exist_ok=False, # existing project/name ok, do not increment
|
77 |
+
line_thickness=3, # bounding box thickness (pixels)
|
78 |
+
hide_labels=False, # hide labels
|
79 |
+
hide_conf=False, # hide confidences
|
80 |
+
half=False, # use FP16 half-precision inference
|
81 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
82 |
+
vid_stride=1, # video frame-rate stride
|
83 |
+
):
|
84 |
+
source = str(source)
|
85 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
86 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
87 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
88 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
89 |
+
screenshot = source.lower().startswith('screen')
|
90 |
+
if is_url and is_file:
|
91 |
+
source = check_file(source) # download
|
92 |
+
|
93 |
+
if imgsz is None and img is None:
|
94 |
+
imgsz = 640
|
95 |
+
elif img is not None:
|
96 |
+
imgsz = img
|
97 |
+
|
98 |
+
if isinstance(imgsz, int):
|
99 |
+
imgsz = [imgsz, imgsz]
|
100 |
+
|
101 |
+
# Directories
|
102 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
103 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
104 |
+
|
105 |
+
# Load model
|
106 |
+
device = select_device(device)
|
107 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
108 |
+
stride, names, pt = model.stride, model.names, model.pt
|
109 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
110 |
+
|
111 |
+
# Dataloader
|
112 |
+
bs = 1 # batch_size
|
113 |
+
if webcam:
|
114 |
+
view_img = check_imshow(warn=True)
|
115 |
+
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
116 |
+
bs = len(dataset)
|
117 |
+
elif screenshot:
|
118 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
119 |
+
else:
|
120 |
+
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
121 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
122 |
+
|
123 |
+
# Run inference
|
124 |
+
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
|
125 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
126 |
+
for path, im, im0s, vid_cap, s in dataset:
|
127 |
+
with dt[0]:
|
128 |
+
im = torch.from_numpy(im).to(model.device)
|
129 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
130 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
131 |
+
if len(im.shape) == 3:
|
132 |
+
im = im[None] # expand for batch dim
|
133 |
+
|
134 |
+
# Inference
|
135 |
+
with dt[1]:
|
136 |
+
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
137 |
+
pred = model(im, augment=augment, visualize=visualize)
|
138 |
+
|
139 |
+
# NMS
|
140 |
+
with dt[2]:
|
141 |
+
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
|
142 |
+
|
143 |
+
# Second-stage classifier (optional)
|
144 |
+
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
145 |
+
|
146 |
+
# Process predictions
|
147 |
+
for i, det in enumerate(pred): # per image
|
148 |
+
seen += 1
|
149 |
+
if webcam: # batch_size >= 1
|
150 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
151 |
+
s += f'{i}: '
|
152 |
+
else:
|
153 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
154 |
+
|
155 |
+
p = Path(p) # to Path
|
156 |
+
save_path = str(save_dir / p.name) # im.jpg
|
157 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
158 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
159 |
+
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
160 |
+
imc = im0.copy() if save_crop else im0 # for save_crop
|
161 |
+
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
162 |
+
if len(det):
|
163 |
+
# Rescale boxes from img_size to im0 size
|
164 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
|
165 |
+
|
166 |
+
# Print results
|
167 |
+
for c in det[:, 5].unique():
|
168 |
+
n = (det[:, 5] == c).sum() # detections per class
|
169 |
+
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
170 |
+
|
171 |
+
# Write results
|
172 |
+
for *xyxy, conf, cls in reversed(det):
|
173 |
+
if save_txt: # Write to file
|
174 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
175 |
+
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
176 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
177 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
178 |
+
|
179 |
+
if save_img or save_crop or view_img: # Add bbox to image
|
180 |
+
c = int(cls) # integer class
|
181 |
+
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
182 |
+
annotator.box_label(xyxy, label, color=colors(c, True))
|
183 |
+
if save_crop:
|
184 |
+
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
185 |
+
|
186 |
+
# Stream results
|
187 |
+
im0 = annotator.result()
|
188 |
+
if view_img:
|
189 |
+
if platform.system() == 'Linux' and p not in windows:
|
190 |
+
windows.append(p)
|
191 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
192 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
193 |
+
cv2.imshow(str(p), im0)
|
194 |
+
cv2.waitKey(1) # 1 millisecond
|
195 |
+
|
196 |
+
# Save results (image with detections)
|
197 |
+
if save_img:
|
198 |
+
if dataset.mode == 'image':
|
199 |
+
cv2.imwrite(save_path, im0)
|
200 |
+
else: # 'video' or 'stream'
|
201 |
+
if vid_path[i] != save_path: # new video
|
202 |
+
vid_path[i] = save_path
|
203 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
204 |
+
vid_writer[i].release() # release previous video writer
|
205 |
+
if vid_cap: # video
|
206 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
207 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
208 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
209 |
+
else: # stream
|
210 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
211 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
212 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
213 |
+
vid_writer[i].write(im0)
|
214 |
+
|
215 |
+
# Print time (inference-only)
|
216 |
+
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
217 |
+
|
218 |
+
# Print results
|
219 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
220 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
221 |
+
if save_txt or save_img:
|
222 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
223 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
224 |
+
if update:
|
225 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
226 |
+
|
227 |
+
|
228 |
+
def parse_opt():
|
229 |
+
parser = argparse.ArgumentParser()
|
230 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path or triton URL')
|
231 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
232 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
233 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
234 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
235 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
236 |
+
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
237 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
238 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
239 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
240 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
241 |
+
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
242 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
243 |
+
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
244 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
245 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
246 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
247 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
248 |
+
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
|
249 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
250 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
251 |
+
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
252 |
+
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
253 |
+
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
254 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
255 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
256 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
257 |
+
opt = parser.parse_args()
|
258 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
259 |
+
print_args(vars(opt))
|
260 |
+
return opt
|
261 |
+
|
262 |
+
|
263 |
+
def main():
|
264 |
+
opt = parse_opt()
|
265 |
+
check_requirements(exclude=('tensorboard', 'thop'))
|
266 |
+
run(**vars(opt))
|
267 |
+
|
268 |
+
|
269 |
+
if __name__ == "__main__":
|
270 |
+
main()
|
infer/yolov5/export.py
ADDED
@@ -0,0 +1,829 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
4 |
+
|
5 |
+
Format | `export.py --include` | Model
|
6 |
+
--- | --- | ---
|
7 |
+
PyTorch | - | yolov5s.pt
|
8 |
+
TorchScript | `torchscript` | yolov5s.torchscript
|
9 |
+
ONNX | `onnx` | yolov5s.onnx
|
10 |
+
OpenVINO | `openvino` | yolov5s_openvino_model/
|
11 |
+
TensorRT | `engine` | yolov5s.engine
|
12 |
+
CoreML | `coreml` | yolov5s.mlmodel
|
13 |
+
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
14 |
+
TensorFlow GraphDef | `pb` | yolov5s.pb
|
15 |
+
TensorFlow Lite | `tflite` | yolov5s.tflite
|
16 |
+
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
17 |
+
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
18 |
+
PaddlePaddle | `paddle` | yolov5s_paddle_model/
|
19 |
+
|
20 |
+
Requirements:
|
21 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
22 |
+
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
23 |
+
|
24 |
+
Usage:
|
25 |
+
$ yolov5 export --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
|
26 |
+
|
27 |
+
Inference:
|
28 |
+
$ yolov5 detect --weights yolov5s.pt # PyTorch
|
29 |
+
yolov5s.torchscript # TorchScript
|
30 |
+
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
31 |
+
yolov5s_openvino_model # OpenVINO
|
32 |
+
yolov5s.engine # TensorRT
|
33 |
+
yolov5s.mlmodel # CoreML (macOS-only)
|
34 |
+
yolov5s_saved_model # TensorFlow SavedModel
|
35 |
+
yolov5s.pb # TensorFlow GraphDef
|
36 |
+
yolov5s.tflite # TensorFlow Lite
|
37 |
+
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
38 |
+
yolov5s_paddle_model # PaddlePaddle
|
39 |
+
|
40 |
+
TensorFlow.js:
|
41 |
+
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
42 |
+
$ npm install
|
43 |
+
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
|
44 |
+
$ npm start
|
45 |
+
"""
|
46 |
+
|
47 |
+
import argparse
|
48 |
+
import contextlib
|
49 |
+
import json
|
50 |
+
import os
|
51 |
+
import platform
|
52 |
+
import re
|
53 |
+
import subprocess
|
54 |
+
import sys
|
55 |
+
import time
|
56 |
+
import warnings
|
57 |
+
from pathlib import Path
|
58 |
+
|
59 |
+
import pandas as pd
|
60 |
+
import torch
|
61 |
+
from torch.utils.mobile_optimizer import optimize_for_mobile
|
62 |
+
|
63 |
+
FILE = Path(__file__).resolve()
|
64 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
65 |
+
if str(ROOT) not in sys.path:
|
66 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
67 |
+
if platform.system() != 'Windows':
|
68 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
69 |
+
|
70 |
+
from infer.yolov5.models.experimental import attempt_load
|
71 |
+
from infer.yolov5.models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
|
72 |
+
from infer.yolov5.utils.dataloaders import LoadImages
|
73 |
+
from infer.yolov5.utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
|
74 |
+
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
|
75 |
+
from infer.yolov5.utils.torch_utils import select_device, smart_inference_mode
|
76 |
+
|
77 |
+
MACOS = platform.system() == 'Darwin' # macOS environment
|
78 |
+
|
79 |
+
|
80 |
+
class iOSModel(torch.nn.Module):
|
81 |
+
|
82 |
+
def __init__(self, model, im):
|
83 |
+
super().__init__()
|
84 |
+
b, c, h, w = im.shape # batch, channel, height, width
|
85 |
+
self.model = model
|
86 |
+
self.nc = model.nc # number of classes
|
87 |
+
if w == h:
|
88 |
+
self.normalize = 1. / w
|
89 |
+
else:
|
90 |
+
self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller)
|
91 |
+
# np = model(im)[0].shape[1] # number of points
|
92 |
+
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
|
93 |
+
|
94 |
+
def forward(self, x):
|
95 |
+
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
|
96 |
+
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
|
97 |
+
|
98 |
+
|
99 |
+
def export_formats():
|
100 |
+
# YOLOv5 export formats
|
101 |
+
x = [
|
102 |
+
['PyTorch', '-', '.pt', True, True],
|
103 |
+
['TorchScript', 'torchscript', '.torchscript', True, True],
|
104 |
+
['ONNX', 'onnx', '.onnx', True, True],
|
105 |
+
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
106 |
+
['TensorRT', 'engine', '.engine', False, True],
|
107 |
+
['CoreML', 'coreml', '.mlmodel', True, False],
|
108 |
+
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
109 |
+
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
110 |
+
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
111 |
+
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
112 |
+
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
113 |
+
['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
|
114 |
+
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
115 |
+
|
116 |
+
|
117 |
+
def try_export(inner_func):
|
118 |
+
# YOLOv5 export decorator, i..e @try_export
|
119 |
+
inner_args = get_default_args(inner_func)
|
120 |
+
|
121 |
+
def outer_func(*args, **kwargs):
|
122 |
+
prefix = inner_args['prefix']
|
123 |
+
try:
|
124 |
+
with Profile() as dt:
|
125 |
+
f, model = inner_func(*args, **kwargs)
|
126 |
+
LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
|
127 |
+
return f, model
|
128 |
+
except Exception as e:
|
129 |
+
LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
|
130 |
+
return None, None
|
131 |
+
|
132 |
+
return outer_func
|
133 |
+
|
134 |
+
|
135 |
+
@try_export
|
136 |
+
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
137 |
+
# YOLOv5 TorchScript model export
|
138 |
+
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
139 |
+
f = file.with_suffix('.torchscript')
|
140 |
+
|
141 |
+
ts = torch.jit.trace(model, im, strict=False)
|
142 |
+
d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
|
143 |
+
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
144 |
+
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
145 |
+
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
|
146 |
+
else:
|
147 |
+
ts.save(str(f), _extra_files=extra_files)
|
148 |
+
return f, None
|
149 |
+
|
150 |
+
|
151 |
+
@try_export
|
152 |
+
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
|
153 |
+
# YOLOv5 ONNX export
|
154 |
+
check_requirements('onnx>=1.12.0')
|
155 |
+
import onnx
|
156 |
+
|
157 |
+
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
158 |
+
f = file.with_suffix('.onnx')
|
159 |
+
|
160 |
+
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
161 |
+
if dynamic:
|
162 |
+
dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
|
163 |
+
if isinstance(model, SegmentationModel):
|
164 |
+
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
165 |
+
dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
|
166 |
+
elif isinstance(model, DetectionModel):
|
167 |
+
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
168 |
+
|
169 |
+
torch.onnx.export(
|
170 |
+
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
|
171 |
+
im.cpu() if dynamic else im,
|
172 |
+
f,
|
173 |
+
verbose=False,
|
174 |
+
opset_version=opset,
|
175 |
+
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
|
176 |
+
input_names=['images'],
|
177 |
+
output_names=output_names,
|
178 |
+
dynamic_axes=dynamic or None)
|
179 |
+
|
180 |
+
# Checks
|
181 |
+
model_onnx = onnx.load(f) # load onnx model
|
182 |
+
onnx.checker.check_model(model_onnx) # check onnx model
|
183 |
+
|
184 |
+
# Metadata
|
185 |
+
d = {'stride': int(max(model.stride)), 'names': model.names}
|
186 |
+
for k, v in d.items():
|
187 |
+
meta = model_onnx.metadata_props.add()
|
188 |
+
meta.key, meta.value = k, str(v)
|
189 |
+
onnx.save(model_onnx, f)
|
190 |
+
|
191 |
+
# Simplify
|
192 |
+
if simplify:
|
193 |
+
try:
|
194 |
+
cuda = torch.cuda.is_available()
|
195 |
+
check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
|
196 |
+
import onnxsim
|
197 |
+
|
198 |
+
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
|
199 |
+
model_onnx, check = onnxsim.simplify(model_onnx)
|
200 |
+
assert check, 'assert check failed'
|
201 |
+
onnx.save(model_onnx, f)
|
202 |
+
except Exception as e:
|
203 |
+
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
204 |
+
return f, model_onnx
|
205 |
+
|
206 |
+
|
207 |
+
@try_export
|
208 |
+
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
|
209 |
+
# YOLOv5 OpenVINO export
|
210 |
+
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
211 |
+
import openvino.inference_engine as ie
|
212 |
+
|
213 |
+
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
|
214 |
+
f = str(file).replace('.pt', f'_openvino_model{os.sep}')
|
215 |
+
|
216 |
+
args = [
|
217 |
+
'mo',
|
218 |
+
'--input_model',
|
219 |
+
str(file.with_suffix('.onnx')),
|
220 |
+
'--output_dir',
|
221 |
+
f,
|
222 |
+
'--data_type',
|
223 |
+
('FP16' if half else 'FP32'),]
|
224 |
+
subprocess.run(args, check=True, env=os.environ) # export
|
225 |
+
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
226 |
+
return f, None
|
227 |
+
|
228 |
+
|
229 |
+
@try_export
|
230 |
+
def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
231 |
+
# YOLOv5 Paddle export
|
232 |
+
check_requirements(('paddlepaddle', 'x2paddle'))
|
233 |
+
import x2paddle
|
234 |
+
from x2paddle.convert import pytorch2paddle
|
235 |
+
|
236 |
+
LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
|
237 |
+
f = str(file).replace('.pt', f'_paddle_model{os.sep}')
|
238 |
+
|
239 |
+
pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
|
240 |
+
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
241 |
+
return f, None
|
242 |
+
|
243 |
+
|
244 |
+
@try_export
|
245 |
+
def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')):
|
246 |
+
# YOLOv5 CoreML export
|
247 |
+
check_requirements('coremltools')
|
248 |
+
import coremltools as ct
|
249 |
+
|
250 |
+
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
251 |
+
f = file.with_suffix('.mlmodel')
|
252 |
+
|
253 |
+
if nms:
|
254 |
+
model = iOSModel(model, im)
|
255 |
+
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
256 |
+
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
257 |
+
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
258 |
+
if bits < 32:
|
259 |
+
if MACOS: # quantization only supported on macOS
|
260 |
+
with warnings.catch_warnings():
|
261 |
+
warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
|
262 |
+
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
263 |
+
else:
|
264 |
+
print(f'{prefix} quantization only supported on macOS, skipping...')
|
265 |
+
ct_model.save(f)
|
266 |
+
return f, ct_model
|
267 |
+
|
268 |
+
|
269 |
+
@try_export
|
270 |
+
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
271 |
+
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
272 |
+
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
273 |
+
try:
|
274 |
+
import tensorrt as trt
|
275 |
+
except Exception:
|
276 |
+
if platform.system() == 'Linux':
|
277 |
+
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
|
278 |
+
import tensorrt as trt
|
279 |
+
|
280 |
+
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
281 |
+
grid = model.model[-1].anchor_grid
|
282 |
+
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
|
283 |
+
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
284 |
+
model.model[-1].anchor_grid = grid
|
285 |
+
else: # TensorRT >= 8
|
286 |
+
check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
|
287 |
+
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
288 |
+
onnx = file.with_suffix('.onnx')
|
289 |
+
|
290 |
+
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
291 |
+
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
292 |
+
f = file.with_suffix('.engine') # TensorRT engine file
|
293 |
+
logger = trt.Logger(trt.Logger.INFO)
|
294 |
+
if verbose:
|
295 |
+
logger.min_severity = trt.Logger.Severity.VERBOSE
|
296 |
+
|
297 |
+
builder = trt.Builder(logger)
|
298 |
+
config = builder.create_builder_config()
|
299 |
+
config.max_workspace_size = workspace * 1 << 30
|
300 |
+
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
|
301 |
+
|
302 |
+
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
|
303 |
+
network = builder.create_network(flag)
|
304 |
+
parser = trt.OnnxParser(network, logger)
|
305 |
+
if not parser.parse_from_file(str(onnx)):
|
306 |
+
raise RuntimeError(f'failed to load ONNX file: {onnx}')
|
307 |
+
|
308 |
+
inputs = [network.get_input(i) for i in range(network.num_inputs)]
|
309 |
+
outputs = [network.get_output(i) for i in range(network.num_outputs)]
|
310 |
+
for inp in inputs:
|
311 |
+
LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
|
312 |
+
for out in outputs:
|
313 |
+
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
|
314 |
+
|
315 |
+
if dynamic:
|
316 |
+
if im.shape[0] <= 1:
|
317 |
+
LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument')
|
318 |
+
profile = builder.create_optimization_profile()
|
319 |
+
for inp in inputs:
|
320 |
+
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
321 |
+
config.add_optimization_profile(profile)
|
322 |
+
|
323 |
+
LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
|
324 |
+
if builder.platform_has_fast_fp16 and half:
|
325 |
+
config.set_flag(trt.BuilderFlag.FP16)
|
326 |
+
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
|
327 |
+
t.write(engine.serialize())
|
328 |
+
return f, None
|
329 |
+
|
330 |
+
|
331 |
+
@try_export
|
332 |
+
def export_saved_model(model,
|
333 |
+
im,
|
334 |
+
file,
|
335 |
+
dynamic,
|
336 |
+
tf_nms=False,
|
337 |
+
agnostic_nms=False,
|
338 |
+
topk_per_class=100,
|
339 |
+
topk_all=100,
|
340 |
+
iou_thres=0.45,
|
341 |
+
conf_thres=0.25,
|
342 |
+
keras=False,
|
343 |
+
prefix=colorstr('TensorFlow SavedModel:')):
|
344 |
+
# YOLOv5 TensorFlow SavedModel export
|
345 |
+
try:
|
346 |
+
import tensorflow as tf
|
347 |
+
except Exception:
|
348 |
+
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
|
349 |
+
import tensorflow as tf
|
350 |
+
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
351 |
+
|
352 |
+
from infer.yolov5.models.tf import TFModel
|
353 |
+
|
354 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
355 |
+
f = str(file).replace('.pt', '_saved_model')
|
356 |
+
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
357 |
+
|
358 |
+
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
359 |
+
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
360 |
+
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
361 |
+
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
|
362 |
+
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
363 |
+
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
364 |
+
keras_model.trainable = False
|
365 |
+
keras_model.summary()
|
366 |
+
if keras:
|
367 |
+
keras_model.save(f, save_format='tf')
|
368 |
+
else:
|
369 |
+
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
370 |
+
m = tf.function(lambda x: keras_model(x)) # full model
|
371 |
+
m = m.get_concrete_function(spec)
|
372 |
+
frozen_func = convert_variables_to_constants_v2(m)
|
373 |
+
tfm = tf.Module()
|
374 |
+
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
|
375 |
+
tfm.__call__(im)
|
376 |
+
tf.saved_model.save(tfm,
|
377 |
+
f,
|
378 |
+
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
|
379 |
+
tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
380 |
+
return f, keras_model
|
381 |
+
|
382 |
+
|
383 |
+
@try_export
|
384 |
+
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
|
385 |
+
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
|
386 |
+
import tensorflow as tf
|
387 |
+
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
388 |
+
|
389 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
390 |
+
f = file.with_suffix('.pb')
|
391 |
+
|
392 |
+
m = tf.function(lambda x: keras_model(x)) # full model
|
393 |
+
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
|
394 |
+
frozen_func = convert_variables_to_constants_v2(m)
|
395 |
+
frozen_func.graph.as_graph_def()
|
396 |
+
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
397 |
+
return f, None
|
398 |
+
|
399 |
+
|
400 |
+
@try_export
|
401 |
+
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
402 |
+
# YOLOv5 TensorFlow Lite export
|
403 |
+
import tensorflow as tf
|
404 |
+
|
405 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
406 |
+
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
407 |
+
f = str(file).replace('.pt', '-fp16.tflite')
|
408 |
+
|
409 |
+
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
|
410 |
+
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
411 |
+
converter.target_spec.supported_types = [tf.float16]
|
412 |
+
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
413 |
+
if int8:
|
414 |
+
from infer.yolov5.models.tf import representative_dataset_gen
|
415 |
+
dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
|
416 |
+
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
|
417 |
+
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
418 |
+
converter.target_spec.supported_types = []
|
419 |
+
converter.inference_input_type = tf.uint8 # or tf.int8
|
420 |
+
converter.inference_output_type = tf.uint8 # or tf.int8
|
421 |
+
converter.experimental_new_quantizer = True
|
422 |
+
f = str(file).replace('.pt', '-int8.tflite')
|
423 |
+
if nms or agnostic_nms:
|
424 |
+
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
|
425 |
+
|
426 |
+
tflite_model = converter.convert()
|
427 |
+
open(f, 'wb').write(tflite_model)
|
428 |
+
return f, None
|
429 |
+
|
430 |
+
|
431 |
+
@try_export
|
432 |
+
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
433 |
+
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
|
434 |
+
cmd = 'edgetpu_compiler --version'
|
435 |
+
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
436 |
+
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
|
437 |
+
if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
|
438 |
+
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
439 |
+
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
440 |
+
for c in (
|
441 |
+
'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
|
442 |
+
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
|
443 |
+
'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
|
444 |
+
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
|
445 |
+
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
|
446 |
+
|
447 |
+
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
448 |
+
f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
|
449 |
+
f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
|
450 |
+
|
451 |
+
subprocess.run([
|
452 |
+
'edgetpu_compiler',
|
453 |
+
'-s',
|
454 |
+
'-d',
|
455 |
+
'-k',
|
456 |
+
'10',
|
457 |
+
'--out_dir',
|
458 |
+
str(file.parent),
|
459 |
+
f_tfl,], check=True)
|
460 |
+
return f, None
|
461 |
+
|
462 |
+
|
463 |
+
@try_export
|
464 |
+
def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
465 |
+
# YOLOv5 TensorFlow.js export
|
466 |
+
check_requirements('tensorflowjs')
|
467 |
+
import tensorflowjs as tfjs
|
468 |
+
|
469 |
+
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
470 |
+
f = str(file).replace('.pt', '_web_model') # js dir
|
471 |
+
f_pb = file.with_suffix('.pb') # *.pb path
|
472 |
+
f_json = f'{f}/model.json' # *.json path
|
473 |
+
|
474 |
+
args = [
|
475 |
+
'tensorflowjs_converter',
|
476 |
+
'--input_format=tf_frozen_model',
|
477 |
+
'--quantize_uint8' if int8 else '',
|
478 |
+
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
479 |
+
str(f_pb),
|
480 |
+
str(f),]
|
481 |
+
subprocess.run([arg for arg in args if arg], check=True)
|
482 |
+
|
483 |
+
json = Path(f_json).read_text()
|
484 |
+
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
485 |
+
subst = re.sub(
|
486 |
+
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
|
487 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
488 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
489 |
+
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
|
490 |
+
r'"Identity_1": {"name": "Identity_1"}, '
|
491 |
+
r'"Identity_2": {"name": "Identity_2"}, '
|
492 |
+
r'"Identity_3": {"name": "Identity_3"}}}', json)
|
493 |
+
j.write(subst)
|
494 |
+
return f, None
|
495 |
+
|
496 |
+
|
497 |
+
def add_tflite_metadata(file, metadata, num_outputs):
|
498 |
+
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
|
499 |
+
with contextlib.suppress(ImportError):
|
500 |
+
# check_requirements('tflite_support')
|
501 |
+
from tflite_support import flatbuffers
|
502 |
+
from tflite_support import metadata as _metadata
|
503 |
+
from tflite_support import metadata_schema_py_generated as _metadata_fb
|
504 |
+
|
505 |
+
tmp_file = Path('/tmp/meta.txt')
|
506 |
+
with open(tmp_file, 'w') as meta_f:
|
507 |
+
meta_f.write(str(metadata))
|
508 |
+
|
509 |
+
model_meta = _metadata_fb.ModelMetadataT()
|
510 |
+
label_file = _metadata_fb.AssociatedFileT()
|
511 |
+
label_file.name = tmp_file.name
|
512 |
+
model_meta.associatedFiles = [label_file]
|
513 |
+
|
514 |
+
subgraph = _metadata_fb.SubGraphMetadataT()
|
515 |
+
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
|
516 |
+
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
|
517 |
+
model_meta.subgraphMetadata = [subgraph]
|
518 |
+
|
519 |
+
b = flatbuffers.Builder(0)
|
520 |
+
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
|
521 |
+
metadata_buf = b.Output()
|
522 |
+
|
523 |
+
populator = _metadata.MetadataPopulator.with_model_file(file)
|
524 |
+
populator.load_metadata_buffer(metadata_buf)
|
525 |
+
populator.load_associated_files([str(tmp_file)])
|
526 |
+
populator.populate()
|
527 |
+
tmp_file.unlink()
|
528 |
+
|
529 |
+
|
530 |
+
def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')):
|
531 |
+
# YOLOv5 CoreML pipeline
|
532 |
+
import coremltools as ct
|
533 |
+
from PIL import Image
|
534 |
+
|
535 |
+
print(f'{prefix} starting pipeline with coremltools {ct.__version__}...')
|
536 |
+
batch_size, ch, h, w = list(im.shape) # BCHW
|
537 |
+
t = time.time()
|
538 |
+
|
539 |
+
# Output shapes
|
540 |
+
spec = model.get_spec()
|
541 |
+
out0, out1 = iter(spec.description.output)
|
542 |
+
if platform.system() == 'Darwin':
|
543 |
+
img = Image.new('RGB', (w, h)) # img(192 width, 320 height)
|
544 |
+
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
|
545 |
+
out = model.predict({'image': img})
|
546 |
+
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
|
547 |
+
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
|
548 |
+
s = tuple(y[0].shape)
|
549 |
+
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
|
550 |
+
|
551 |
+
# Checks
|
552 |
+
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
|
553 |
+
na, nc = out0_shape
|
554 |
+
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
|
555 |
+
assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check
|
556 |
+
|
557 |
+
# Define output shapes (missing)
|
558 |
+
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
|
559 |
+
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
|
560 |
+
# spec.neuralNetwork.preprocessing[0].featureName = '0'
|
561 |
+
|
562 |
+
# Flexible input shapes
|
563 |
+
# from coremltools.models.neural_network import flexible_shape_utils
|
564 |
+
# s = [] # shapes
|
565 |
+
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
|
566 |
+
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
|
567 |
+
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
|
568 |
+
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
|
569 |
+
# r.add_height_range((192, 640))
|
570 |
+
# r.add_width_range((192, 640))
|
571 |
+
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
|
572 |
+
|
573 |
+
# Print
|
574 |
+
print(spec.description)
|
575 |
+
|
576 |
+
# Model from spec
|
577 |
+
model = ct.models.MLModel(spec)
|
578 |
+
|
579 |
+
# 3. Create NMS protobuf
|
580 |
+
nms_spec = ct.proto.Model_pb2.Model()
|
581 |
+
nms_spec.specificationVersion = 5
|
582 |
+
for i in range(2):
|
583 |
+
decoder_output = model._spec.description.output[i].SerializeToString()
|
584 |
+
nms_spec.description.input.add()
|
585 |
+
nms_spec.description.input[i].ParseFromString(decoder_output)
|
586 |
+
nms_spec.description.output.add()
|
587 |
+
nms_spec.description.output[i].ParseFromString(decoder_output)
|
588 |
+
|
589 |
+
nms_spec.description.output[0].name = 'confidence'
|
590 |
+
nms_spec.description.output[1].name = 'coordinates'
|
591 |
+
|
592 |
+
output_sizes = [nc, 4]
|
593 |
+
for i in range(2):
|
594 |
+
ma_type = nms_spec.description.output[i].type.multiArrayType
|
595 |
+
ma_type.shapeRange.sizeRanges.add()
|
596 |
+
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
|
597 |
+
ma_type.shapeRange.sizeRanges[0].upperBound = -1
|
598 |
+
ma_type.shapeRange.sizeRanges.add()
|
599 |
+
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
|
600 |
+
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
|
601 |
+
del ma_type.shape[:]
|
602 |
+
|
603 |
+
nms = nms_spec.nonMaximumSuppression
|
604 |
+
nms.confidenceInputFeatureName = out0.name # 1x507x80
|
605 |
+
nms.coordinatesInputFeatureName = out1.name # 1x507x4
|
606 |
+
nms.confidenceOutputFeatureName = 'confidence'
|
607 |
+
nms.coordinatesOutputFeatureName = 'coordinates'
|
608 |
+
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
609 |
+
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
610 |
+
nms.iouThreshold = 0.45
|
611 |
+
nms.confidenceThreshold = 0.25
|
612 |
+
nms.pickTop.perClass = True
|
613 |
+
nms.stringClassLabels.vector.extend(names.values())
|
614 |
+
nms_model = ct.models.MLModel(nms_spec)
|
615 |
+
|
616 |
+
# 4. Pipeline models together
|
617 |
+
pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)),
|
618 |
+
('iouThreshold', ct.models.datatypes.Double()),
|
619 |
+
('confidenceThreshold', ct.models.datatypes.Double())],
|
620 |
+
output_features=['confidence', 'coordinates'])
|
621 |
+
pipeline.add_model(model)
|
622 |
+
pipeline.add_model(nms_model)
|
623 |
+
|
624 |
+
# Correct datatypes
|
625 |
+
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
|
626 |
+
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
|
627 |
+
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
|
628 |
+
|
629 |
+
# Update metadata
|
630 |
+
pipeline.spec.specificationVersion = 5
|
631 |
+
pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5'
|
632 |
+
pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5'
|
633 |
+
pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com'
|
634 |
+
pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE'
|
635 |
+
pipeline.spec.description.metadata.userDefined.update({
|
636 |
+
'classes': ','.join(names.values()),
|
637 |
+
'iou_threshold': str(nms.iouThreshold),
|
638 |
+
'confidence_threshold': str(nms.confidenceThreshold)})
|
639 |
+
|
640 |
+
# Save the model
|
641 |
+
f = file.with_suffix('.mlmodel') # filename
|
642 |
+
model = ct.models.MLModel(pipeline.spec)
|
643 |
+
model.input_description['image'] = 'Input image'
|
644 |
+
model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})'
|
645 |
+
model.input_description['confidenceThreshold'] = \
|
646 |
+
f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})'
|
647 |
+
model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")'
|
648 |
+
model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)'
|
649 |
+
model.save(f) # pipelined
|
650 |
+
print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)')
|
651 |
+
|
652 |
+
|
653 |
+
@smart_inference_mode()
|
654 |
+
def run(
|
655 |
+
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
656 |
+
weights='yolov5s.pt', # weights path
|
657 |
+
imgsz=None, # inference size (pixels)
|
658 |
+
img=None, # inference size (pixels)
|
659 |
+
batch_size=1, # batch size
|
660 |
+
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
661 |
+
include=('torchscript', 'onnx'), # include formats
|
662 |
+
half=False, # FP16 half-precision export
|
663 |
+
inplace=False, # set YOLOv5 Detect() inplace=True
|
664 |
+
keras=False, # use Keras
|
665 |
+
optimize=False, # TorchScript: optimize for mobile
|
666 |
+
int8=False, # CoreML/TF INT8 quantization
|
667 |
+
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
|
668 |
+
simplify=False, # ONNX: simplify model
|
669 |
+
opset=12, # ONNX: opset version
|
670 |
+
verbose=False, # TensorRT: verbose log
|
671 |
+
workspace=4, # TensorRT: workspace size (GB)
|
672 |
+
nms=False, # TF: add NMS to model
|
673 |
+
agnostic_nms=False, # TF: add agnostic NMS to model
|
674 |
+
topk_per_class=100, # TF.js NMS: topk per class to keep
|
675 |
+
topk_all=100, # TF.js NMS: topk for all classes to keep
|
676 |
+
iou_thres=0.45, # TF.js NMS: IoU threshold
|
677 |
+
conf_thres=0.25, # TF.js NMS: confidence threshold
|
678 |
+
):
|
679 |
+
t = time.time()
|
680 |
+
|
681 |
+
# handle fire args as: include=['torchscript,onnx,tflite']
|
682 |
+
if isinstance(include, list) and (',' in include[0]):
|
683 |
+
include = include[0].split(',')
|
684 |
+
|
685 |
+
if imgsz is None and img is None:
|
686 |
+
imgsz = (640, 640)
|
687 |
+
elif img is not None:
|
688 |
+
imgsz = img
|
689 |
+
|
690 |
+
include = [x.lower() for x in include] # to lowercase
|
691 |
+
fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
|
692 |
+
flags = [x in include for x in fmts]
|
693 |
+
assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
|
694 |
+
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
|
695 |
+
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
|
696 |
+
|
697 |
+
# Load PyTorch model
|
698 |
+
device = select_device(device)
|
699 |
+
if half:
|
700 |
+
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
|
701 |
+
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
|
702 |
+
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
|
703 |
+
|
704 |
+
# Checks
|
705 |
+
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
706 |
+
if optimize:
|
707 |
+
assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
|
708 |
+
|
709 |
+
# Input
|
710 |
+
gs = int(max(model.stride)) # grid size (max stride)
|
711 |
+
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
712 |
+
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
|
713 |
+
|
714 |
+
# Update model
|
715 |
+
model.eval()
|
716 |
+
for k, m in model.named_modules():
|
717 |
+
if isinstance(m, Detect):
|
718 |
+
m.inplace = inplace
|
719 |
+
m.dynamic = dynamic
|
720 |
+
m.export = True
|
721 |
+
|
722 |
+
for _ in range(2):
|
723 |
+
y = model(im) # dry runs
|
724 |
+
if half and not coreml:
|
725 |
+
im, model = im.half(), model.half() # to FP16
|
726 |
+
shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
|
727 |
+
metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
|
728 |
+
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
|
729 |
+
|
730 |
+
# Exports
|
731 |
+
f = [''] * len(fmts) # exported filenames
|
732 |
+
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
|
733 |
+
if jit: # TorchScript
|
734 |
+
f[0], _ = export_torchscript(model, im, file, optimize)
|
735 |
+
if engine: # TensorRT required before ONNX
|
736 |
+
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
|
737 |
+
if onnx or xml: # OpenVINO requires ONNX
|
738 |
+
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
|
739 |
+
if xml: # OpenVINO
|
740 |
+
f[3], _ = export_openvino(file, metadata, half)
|
741 |
+
if coreml: # CoreML
|
742 |
+
f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
|
743 |
+
if nms:
|
744 |
+
pipeline_coreml(ct_model, im, file, model.names, y)
|
745 |
+
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
|
746 |
+
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
|
747 |
+
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
|
748 |
+
f[5], s_model = export_saved_model(model.cpu(),
|
749 |
+
im,
|
750 |
+
file,
|
751 |
+
dynamic,
|
752 |
+
tf_nms=nms or agnostic_nms or tfjs,
|
753 |
+
agnostic_nms=agnostic_nms or tfjs,
|
754 |
+
topk_per_class=topk_per_class,
|
755 |
+
topk_all=topk_all,
|
756 |
+
iou_thres=iou_thres,
|
757 |
+
conf_thres=conf_thres,
|
758 |
+
keras=keras)
|
759 |
+
if pb or tfjs: # pb prerequisite to tfjs
|
760 |
+
f[6], _ = export_pb(s_model, file)
|
761 |
+
if tflite or edgetpu:
|
762 |
+
f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
|
763 |
+
if edgetpu:
|
764 |
+
f[8], _ = export_edgetpu(file)
|
765 |
+
add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
|
766 |
+
if tfjs:
|
767 |
+
f[9], _ = export_tfjs(file, int8)
|
768 |
+
if paddle: # PaddlePaddle
|
769 |
+
f[10], _ = export_paddle(model, im, file, metadata)
|
770 |
+
|
771 |
+
# Finish
|
772 |
+
f = [str(x) for x in f if x] # filter out '' and None
|
773 |
+
if any(f):
|
774 |
+
cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
|
775 |
+
det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
|
776 |
+
dir = Path('segment' if seg else 'classify' if cls else '')
|
777 |
+
h = '--half' if half else '' # --half FP16 inference arg
|
778 |
+
s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \
|
779 |
+
'# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else ''
|
780 |
+
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
|
781 |
+
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
782 |
+
f"\nDetect: yolov5 {'detect' if det else 'predict'} --weights {f[-1]} {h}"
|
783 |
+
f"\nValidate: yolov5 val --weights {f[-1]} {h}"
|
784 |
+
f"\nPython: model = yolov5.load('{f[-1]}') {s}"
|
785 |
+
f'\nVisualize: https://netron.app')
|
786 |
+
return f # return list of exported files/dirs
|
787 |
+
|
788 |
+
|
789 |
+
def parse_opt(known=False):
|
790 |
+
parser = argparse.ArgumentParser()
|
791 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
792 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
|
793 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
|
794 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
795 |
+
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
796 |
+
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
|
797 |
+
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
|
798 |
+
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
|
799 |
+
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
|
800 |
+
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
|
801 |
+
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
|
802 |
+
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
|
803 |
+
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
|
804 |
+
parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
|
805 |
+
parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
|
806 |
+
parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
|
807 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
|
808 |
+
parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
|
809 |
+
parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
|
810 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
|
811 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
|
812 |
+
parser.add_argument(
|
813 |
+
'--include',
|
814 |
+
nargs='+',
|
815 |
+
default=['torchscript'],
|
816 |
+
help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
|
817 |
+
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
818 |
+
print_args(vars(opt))
|
819 |
+
return opt
|
820 |
+
|
821 |
+
|
822 |
+
def main():
|
823 |
+
opt = parse_opt()
|
824 |
+
for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
|
825 |
+
run(**vars(opt))
|
826 |
+
|
827 |
+
|
828 |
+
if __name__ == "__main__":
|
829 |
+
main()
|
infer/yolov5/get_results.py
CHANGED
@@ -2,31 +2,92 @@ import torch
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
5 |
|
6 |
colors = [(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)) for _ in range(20)]
|
7 |
|
8 |
def get_yolov5_result(image, conf_threshold, iou_threshold, class_names):
|
9 |
weights = 'my_model/v5-n.pt'
|
10 |
device = 'cpu'
|
11 |
-
model = torch.hub.load('ultralytics/yolov5', 'custom', path=weights, force_reload=True)
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
results = model(image, size=320)
|
16 |
-
st.write(results.pandas().xyxy[0])
|
17 |
|
|
|
18 |
img0 = image.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
result_list = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
cv2.rectangle(img0, (int(det[0]), int(det[1])), (int(det[2]), int(det[3])), color, 2)
|
27 |
-
cv2.putText(img0, label, (int(det[0]), int(det[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
28 |
|
29 |
-
img0 = img0[...,::-1]
|
30 |
return img0, result_list
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import streamlit as st
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
|
8 |
+
from infer.yolov5 import detect
|
9 |
|
10 |
colors = [(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)) for _ in range(20)]
|
11 |
|
12 |
def get_yolov5_result(image, conf_threshold, iou_threshold, class_names):
|
13 |
weights = 'my_model/v5-n.pt'
|
14 |
device = 'cpu'
|
15 |
+
#model = torch.hub.load('ultralytics/yolov5', 'custom', path=weights, force_reload=True)
|
16 |
+
cv2.imwrite('temp.png', image)
|
17 |
+
detect.run(source='temp.png', weights=weights, conf_thres=conf_threshold, iou_thres=iou_threshold,
|
18 |
+
imgsz=320, project='', name='temp', data='infer/yolov5/lung_en.yaml', save_txt=True, save_conf=True)
|
|
|
|
|
19 |
|
20 |
+
#img0 = cv2.imread('temp/temp.png')
|
21 |
img0 = image.copy()
|
22 |
+
|
23 |
+
# class_names = ['epiglottis', 'vocal cord', 'trachea', 'carina', 'right main bronchus', 'intermediate bronchus',
|
24 |
+
# 'right upper lobar bronchus', 'right middle lobar bronchus', 'right lower lobar bronchus',
|
25 |
+
# 'right superior segment bronchus', 'right basal bronchus', 'left main bronchus', 'left upper lobar bronchus',
|
26 |
+
# 'left division bronchus', 'left lingular bronchus', 'left lower bronchus', 'left superior segment',
|
27 |
+
# 'left basal bronchus']
|
28 |
+
|
29 |
+
# model.to(device)
|
30 |
+
# model.eval()
|
31 |
+
|
32 |
+
# results = model(image, size=320)
|
33 |
+
#st.write(results.pandas().xyxy[0])
|
34 |
+
|
35 |
+
# img0 = image.copy()
|
36 |
+
# result_list = []
|
37 |
+
#
|
38 |
+
# for i, det in enumerate(results.xyxy[0]):
|
39 |
+
# if det[4] > conf_threshold:
|
40 |
+
# label = class_names[int(det[5]) - 1]
|
41 |
+
# color = colors[int(det[5]) % len(colors)]
|
42 |
+
# result_list.append([label, color, det[4]])
|
43 |
+
# cv2.rectangle(img0, (int(det[0]), int(det[1])), (int(det[2]), int(det[3])), color, 2)
|
44 |
+
# cv2.putText(img0, label, (int(det[0]), int(det[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
45 |
+
#
|
46 |
+
|
47 |
result_list = []
|
48 |
+
with open('temp/labels/temp.txt', "r") as f:
|
49 |
+
lines = f.readlines()
|
50 |
+
for line in lines:
|
51 |
+
line = line.split(" ")
|
52 |
+
x_center, y_center, width, height = map(float, line[1:5])
|
53 |
+
|
54 |
+
img_height, img_width = img0.shape[:2]
|
55 |
+
x_center *= img_width
|
56 |
+
y_center *= img_height
|
57 |
+
width *= img_width
|
58 |
+
height *= img_height
|
59 |
+
|
60 |
+
x1 = int(x_center - width / 2)
|
61 |
+
y1 = int(y_center - height / 2)
|
62 |
+
x2 = int(x_center + width / 2)
|
63 |
+
y2 = int(y_center + height / 2)
|
64 |
+
label = f'{class_names[int(line[0])]} {float(line[5]):.2f}'
|
65 |
+
color = colors[int(line[0]) % len(colors)]
|
66 |
+
result_list.append([class_names[int(line[0])], color, float(line[5])])
|
67 |
+
cv2.rectangle(img0, (x1, y1), (x2, y2), color, 2)
|
68 |
+
cv2.putText(img0, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
69 |
|
70 |
+
#st.write(result_list)
|
71 |
+
# return img0, result_list
|
72 |
+
img0 = img0[..., ::-1]
|
73 |
+
|
74 |
+
remove_yolov5_temp_folders()
|
|
|
|
|
75 |
|
|
|
76 |
return img0, result_list
|
77 |
|
78 |
+
def remove_yolov5_temp_folders():
|
79 |
+
try:
|
80 |
+
cwd = os.getcwd()
|
81 |
+
temp_folders = [d for d in os.listdir(cwd) if os.path.isdir(os.path.join(cwd, d)) and d.startswith('temp')]
|
82 |
+
for folder in temp_folders:
|
83 |
+
shutil.rmtree(os.path.join(cwd, folder), ignore_errors=True)
|
84 |
+
|
85 |
+
temp_png_path = os.path.join(cwd, 'temp.png')
|
86 |
+
if os.path.exists(temp_png_path):
|
87 |
+
os.remove(temp_png_path)
|
88 |
+
|
89 |
+
return True
|
90 |
+
except Exception as e:
|
91 |
+
print(f'Error removing temp folders: {e}')
|
92 |
+
return False
|
93 |
|
infer/yolov5/helpers.py
ADDED
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import warnings
|
3 |
+
|
4 |
+
from infer.yolov5.models.common import AutoShape, DetectMultiBackend
|
5 |
+
from infer.yolov5.models.experimental import attempt_load
|
6 |
+
from infer.yolov5.models.yolo import ClassificationModel, SegmentationModel
|
7 |
+
from infer.yolov5.utils.general import LOGGER, logging
|
8 |
+
from infer.yolov5.utils.torch_utils import select_device
|
9 |
+
|
10 |
+
|
11 |
+
def load_model(
|
12 |
+
model_path, device=None, autoshape=True, verbose=False, hf_token: str = None
|
13 |
+
):
|
14 |
+
"""
|
15 |
+
Creates a specified YOLOv5 model
|
16 |
+
|
17 |
+
Arguments:
|
18 |
+
model_path (str): path of the model
|
19 |
+
device (str): select device that model will be loaded (cpu, cuda)
|
20 |
+
pretrained (bool): load pretrained weights into the model
|
21 |
+
autoshape (bool): make model ready for inference
|
22 |
+
verbose (bool): if False, yolov5 logs will be silent
|
23 |
+
hf_token (str): huggingface read token for private models
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
pytorch model
|
27 |
+
|
28 |
+
(Adapted from yolov5.hubconf.create)
|
29 |
+
"""
|
30 |
+
# set logging
|
31 |
+
if not verbose:
|
32 |
+
LOGGER.setLevel(logging.WARNING)
|
33 |
+
|
34 |
+
# set device
|
35 |
+
device = select_device(device)
|
36 |
+
|
37 |
+
try:
|
38 |
+
model = DetectMultiBackend(
|
39 |
+
model_path, device=device, fuse=autoshape, hf_token=hf_token
|
40 |
+
) # detection model
|
41 |
+
if autoshape:
|
42 |
+
if model.pt and isinstance(model.model, ClassificationModel):
|
43 |
+
LOGGER.warning(
|
44 |
+
"WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. "
|
45 |
+
"You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224)."
|
46 |
+
)
|
47 |
+
elif model.pt and isinstance(model.model, SegmentationModel):
|
48 |
+
LOGGER.warning(
|
49 |
+
"WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. "
|
50 |
+
"You will not be able to run inference with this model."
|
51 |
+
)
|
52 |
+
else:
|
53 |
+
try:
|
54 |
+
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
55 |
+
except Exception as e:
|
56 |
+
LOGGER.warning(f"WARNING ⚠️ autoshape failed: {e}")
|
57 |
+
except Exception as e:
|
58 |
+
LOGGER.warning(f"WARNING ⚠️ DetectMultiBackend failed: {e}")
|
59 |
+
model = attempt_load(model_path, device=device, fuse=False) # arbitrary model
|
60 |
+
|
61 |
+
if not verbose:
|
62 |
+
LOGGER.setLevel(logging.INFO) # reset to default
|
63 |
+
|
64 |
+
return model.to(device)
|
65 |
+
|
66 |
+
|
67 |
+
class YOLOv5:
|
68 |
+
def __init__(self, model_path, device=None, load_on_init=True):
|
69 |
+
warnings.warn("YOLOv5 class is deprecated and will be removed in future release. Use 'model = yolov5.load()' instead.", DeprecationWarning)
|
70 |
+
|
71 |
+
self.model_path = model_path
|
72 |
+
self.device = device
|
73 |
+
if load_on_init:
|
74 |
+
Path(model_path).parents[0].mkdir(parents=True, exist_ok=True)
|
75 |
+
self.model = load_model(
|
76 |
+
model_path=model_path, device=device, autoshape=True
|
77 |
+
)
|
78 |
+
else:
|
79 |
+
self.model = None
|
80 |
+
|
81 |
+
def load_model(self):
|
82 |
+
"""
|
83 |
+
Load yolov5 weight.
|
84 |
+
"""
|
85 |
+
Path(self.model_path).parents[0].mkdir(parents=True, exist_ok=True)
|
86 |
+
self.model = load_model(
|
87 |
+
model_path=self.model_path, device=self.device, autoshape=True
|
88 |
+
)
|
89 |
+
|
90 |
+
def predict(self, image_list, size=640, augment=False):
|
91 |
+
"""
|
92 |
+
Perform yolov5 prediction using loaded model weights.
|
93 |
+
|
94 |
+
Returns results as a yolov5.models.common.Detections object.
|
95 |
+
"""
|
96 |
+
assert self.model is not None, "before predict, you need to call .load_model()"
|
97 |
+
results = self.model(ims=image_list, size=size, augment=augment)
|
98 |
+
return results
|
99 |
+
|
100 |
+
|
101 |
+
def generate_model_usage_markdown(
|
102 |
+
repo_id, ap50, task="object-detection", input_size=640, dataset_id=None
|
103 |
+
):
|
104 |
+
from yolov5 import __version__ as yolov5_version
|
105 |
+
|
106 |
+
if dataset_id is not None:
|
107 |
+
datasets_str_1 = f"""
|
108 |
+
datasets:
|
109 |
+
- {dataset_id}
|
110 |
+
"""
|
111 |
+
datasets_str_2 = f"""
|
112 |
+
dataset:
|
113 |
+
type: {dataset_id}
|
114 |
+
name: {dataset_id}
|
115 |
+
split: validation
|
116 |
+
"""
|
117 |
+
else:
|
118 |
+
datasets_str_1 = datasets_str_2 = ""
|
119 |
+
return f"""
|
120 |
+
---
|
121 |
+
tags:
|
122 |
+
- yolov5
|
123 |
+
- yolo
|
124 |
+
- vision
|
125 |
+
- {task}
|
126 |
+
- pytorch
|
127 |
+
library_name: yolov5
|
128 |
+
library_version: {yolov5_version}
|
129 |
+
inference: false
|
130 |
+
{datasets_str_1}
|
131 |
+
model-index:
|
132 |
+
- name: {repo_id}
|
133 |
+
results:
|
134 |
+
- task:
|
135 |
+
type: {task}
|
136 |
+
{datasets_str_2}
|
137 |
+
metrics:
|
138 |
+
- type: precision # since mAP@0.5 is not available on hf.co/metrics
|
139 |
+
value: {ap50} # min: 0.0 - max: 1.0
|
140 |
+
name: mAP@0.5
|
141 |
+
---
|
142 |
+
|
143 |
+
<div align="center">
|
144 |
+
<img width="640" alt="{repo_id}" src="https://huggingface.co/{repo_id}/resolve/main/sample_visuals.jpg">
|
145 |
+
</div>
|
146 |
+
|
147 |
+
### How to use
|
148 |
+
|
149 |
+
- Install [yolov5](https://github.com/fcakyon/yolov5-pip):
|
150 |
+
|
151 |
+
```bash
|
152 |
+
pip install -U yolov5
|
153 |
+
```
|
154 |
+
|
155 |
+
- Load model and perform prediction:
|
156 |
+
|
157 |
+
```python
|
158 |
+
import yolov5
|
159 |
+
|
160 |
+
# load model
|
161 |
+
model = yolov5.load('{repo_id}')
|
162 |
+
|
163 |
+
# set model parameters
|
164 |
+
model.conf = 0.25 # NMS confidence threshold
|
165 |
+
model.iou = 0.45 # NMS IoU threshold
|
166 |
+
model.agnostic = False # NMS class-agnostic
|
167 |
+
model.multi_label = False # NMS multiple labels per box
|
168 |
+
model.max_det = 1000 # maximum number of detections per image
|
169 |
+
|
170 |
+
# set image
|
171 |
+
img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg'
|
172 |
+
|
173 |
+
# perform inference
|
174 |
+
results = model(img, size={input_size})
|
175 |
+
|
176 |
+
# inference with test time augmentation
|
177 |
+
results = model(img, augment=True)
|
178 |
+
|
179 |
+
# parse results
|
180 |
+
predictions = results.pred[0]
|
181 |
+
boxes = predictions[:, :4] # x1, y1, x2, y2
|
182 |
+
scores = predictions[:, 4]
|
183 |
+
categories = predictions[:, 5]
|
184 |
+
|
185 |
+
# show detection bounding boxes on image
|
186 |
+
results.show()
|
187 |
+
|
188 |
+
# save results into "results/" folder
|
189 |
+
results.save(save_dir='results/')
|
190 |
+
```
|
191 |
+
|
192 |
+
- Finetune the model on your custom dataset:
|
193 |
+
|
194 |
+
```bash
|
195 |
+
yolov5 train --data data.yaml --img {input_size} --batch 16 --weights {repo_id} --epochs 10
|
196 |
+
```
|
197 |
+
|
198 |
+
**More models available at: [awesome-yolov5-models](https://github.com/keremberke/awesome-yolov5-models)**
|
199 |
+
"""
|
200 |
+
|
201 |
+
|
202 |
+
def push_model_card_to_hfhub(
|
203 |
+
repo_id,
|
204 |
+
exp_folder,
|
205 |
+
ap50,
|
206 |
+
hf_token=None,
|
207 |
+
input_size=640,
|
208 |
+
task="object-detection",
|
209 |
+
private=False,
|
210 |
+
dataset_id=None,
|
211 |
+
):
|
212 |
+
from huggingface_hub import upload_file, create_repo
|
213 |
+
|
214 |
+
create_repo(
|
215 |
+
repo_id=repo_id,
|
216 |
+
token=hf_token,
|
217 |
+
private=private,
|
218 |
+
exist_ok=True,
|
219 |
+
)
|
220 |
+
|
221 |
+
# upload sample visual to the repo
|
222 |
+
sample_visual_path = Path(exp_folder) / "val_batch0_labels.jpg"
|
223 |
+
upload_file(
|
224 |
+
repo_id=repo_id,
|
225 |
+
path_or_fileobj=str(sample_visual_path),
|
226 |
+
path_in_repo="sample_visuals.jpg",
|
227 |
+
commit_message="upload sample visuals",
|
228 |
+
token=hf_token,
|
229 |
+
repo_type="model",
|
230 |
+
)
|
231 |
+
|
232 |
+
# Create model card
|
233 |
+
modelcard_markdown = generate_model_usage_markdown(
|
234 |
+
repo_id,
|
235 |
+
task=task,
|
236 |
+
input_size=input_size,
|
237 |
+
dataset_id=dataset_id,
|
238 |
+
ap50=ap50,
|
239 |
+
)
|
240 |
+
modelcard_path = Path(exp_folder) / "README.md"
|
241 |
+
with open(modelcard_path, "w") as file_object:
|
242 |
+
file_object.write(modelcard_markdown)
|
243 |
+
upload_file(
|
244 |
+
repo_id=repo_id,
|
245 |
+
path_or_fileobj=str(modelcard_path),
|
246 |
+
path_in_repo=Path(modelcard_path).name,
|
247 |
+
commit_message="Add yolov5 model card",
|
248 |
+
token=hf_token,
|
249 |
+
repo_type="model",
|
250 |
+
)
|
251 |
+
|
252 |
+
|
253 |
+
def push_config_to_hfhub(
|
254 |
+
repo_id,
|
255 |
+
exp_folder,
|
256 |
+
best_ap50=None,
|
257 |
+
input_size=640,
|
258 |
+
task="object-detection",
|
259 |
+
hf_token=None,
|
260 |
+
private=False,
|
261 |
+
):
|
262 |
+
"""
|
263 |
+
Pushes a yolov5 config to huggingface hub
|
264 |
+
|
265 |
+
Arguments:
|
266 |
+
repo_id (str): The name of the repository to create on huggingface.co
|
267 |
+
exp_folder (str): The path to the experiment folder
|
268 |
+
best_ap50 (float): The best ap50 score of the model
|
269 |
+
input_size (int): The input size of the model (default: 640)
|
270 |
+
task (str): The task of the model (default: object-detection)
|
271 |
+
hf_token (str): The huggingface token to use to push the model
|
272 |
+
private (bool): Whether the model should be private or not
|
273 |
+
"""
|
274 |
+
from huggingface_hub import upload_file, create_repo
|
275 |
+
import json
|
276 |
+
|
277 |
+
config = {"input_size": input_size, "task": task, "best_ap50": best_ap50}
|
278 |
+
config_path = Path(exp_folder) / "config.json"
|
279 |
+
with open(config_path, "w") as file_object:
|
280 |
+
json.dump(config, file_object)
|
281 |
+
|
282 |
+
create_repo(
|
283 |
+
repo_id=repo_id,
|
284 |
+
token=hf_token,
|
285 |
+
private=private,
|
286 |
+
exist_ok=True,
|
287 |
+
)
|
288 |
+
upload_file(
|
289 |
+
repo_id=repo_id,
|
290 |
+
path_or_fileobj=str(config_path),
|
291 |
+
path_in_repo=Path(config_path).name,
|
292 |
+
commit_message="Add yolov5 config",
|
293 |
+
token=hf_token,
|
294 |
+
repo_type="model",
|
295 |
+
)
|
296 |
+
|
297 |
+
|
298 |
+
def push_model_to_hfhub(repo_id, exp_folder, hf_token=None, private=False):
|
299 |
+
"""
|
300 |
+
Pushes a yolov5 model to huggingface hub
|
301 |
+
|
302 |
+
Arguments:
|
303 |
+
repo_id (str): huggingface repo id to be uploaded to
|
304 |
+
exp_folder (str): yolov5 experiment folder path
|
305 |
+
hf_token (str): huggingface write token
|
306 |
+
private (bool): whether to make the repo private or not
|
307 |
+
"""
|
308 |
+
from huggingface_hub import upload_file, create_repo
|
309 |
+
from glob import glob
|
310 |
+
|
311 |
+
best_model_path = Path(exp_folder) / "weights/best.pt"
|
312 |
+
tensorboard_log_path = glob(f"{exp_folder}/events.out.tfevents*")[-1]
|
313 |
+
|
314 |
+
create_repo(
|
315 |
+
repo_id=repo_id,
|
316 |
+
token=hf_token,
|
317 |
+
private=private,
|
318 |
+
exist_ok=True,
|
319 |
+
)
|
320 |
+
upload_file(
|
321 |
+
repo_id=repo_id,
|
322 |
+
path_or_fileobj=str(best_model_path),
|
323 |
+
path_in_repo=Path(best_model_path).name,
|
324 |
+
commit_message="Upload yolov5 best model",
|
325 |
+
token=hf_token,
|
326 |
+
repo_type="model",
|
327 |
+
)
|
328 |
+
upload_file(
|
329 |
+
repo_id=repo_id,
|
330 |
+
path_or_fileobj=str(tensorboard_log_path),
|
331 |
+
path_in_repo=Path(tensorboard_log_path).name,
|
332 |
+
commit_message="Upload yolov5 tensorboard logs",
|
333 |
+
token=hf_token,
|
334 |
+
repo_type="model",
|
335 |
+
)
|
336 |
+
|
337 |
+
|
338 |
+
def push_to_hfhub(
|
339 |
+
hf_model_id,
|
340 |
+
hf_token,
|
341 |
+
hf_private,
|
342 |
+
save_dir,
|
343 |
+
hf_dataset_id=None,
|
344 |
+
input_size=640,
|
345 |
+
best_ap50=None,
|
346 |
+
task="object-detection",
|
347 |
+
):
|
348 |
+
from yolov5.utils.general import colorstr
|
349 |
+
from yolov5.helpers import (
|
350 |
+
push_config_to_hfhub,
|
351 |
+
push_model_card_to_hfhub,
|
352 |
+
push_model_to_hfhub,
|
353 |
+
)
|
354 |
+
|
355 |
+
LOGGER.info(f"{colorstr('hub:')} Pushing to hf.co/{hf_model_id}")
|
356 |
+
|
357 |
+
push_config_to_hfhub(
|
358 |
+
repo_id=hf_model_id,
|
359 |
+
exp_folder=save_dir,
|
360 |
+
best_ap50=best_ap50,
|
361 |
+
input_size=input_size,
|
362 |
+
task=task,
|
363 |
+
hf_token=hf_token,
|
364 |
+
private=hf_private,
|
365 |
+
)
|
366 |
+
push_model_card_to_hfhub(
|
367 |
+
repo_id=hf_model_id,
|
368 |
+
exp_folder=save_dir,
|
369 |
+
input_size=input_size,
|
370 |
+
task=task,
|
371 |
+
hf_token=hf_token,
|
372 |
+
private=hf_private,
|
373 |
+
dataset_id=hf_dataset_id,
|
374 |
+
ap50=best_ap50,
|
375 |
+
)
|
376 |
+
push_model_to_hfhub(
|
377 |
+
repo_id=hf_model_id, exp_folder=save_dir, hf_token=hf_token, private=hf_private
|
378 |
+
)
|
379 |
+
|
380 |
+
|
381 |
+
def convert_coco_dataset_to_yolo(opt, save_dir):
|
382 |
+
import yaml
|
383 |
+
from shutil import copyfile
|
384 |
+
|
385 |
+
is_coco_data = False
|
386 |
+
has_yolo_s3_data_dir = False
|
387 |
+
with open(opt.data, errors="ignore") as f:
|
388 |
+
data_info = yaml.safe_load(f) # load data dict
|
389 |
+
if data_info.get("train_json_path") is not None:
|
390 |
+
is_coco_data = True
|
391 |
+
if data_info.get("yolo_s3_data_dir") is not None:
|
392 |
+
has_yolo_s3_data_dir = True
|
393 |
+
|
394 |
+
if has_yolo_s3_data_dir and opt.upload_dataset:
|
395 |
+
raise ValueError(
|
396 |
+
"'--upload_dataset' argument cannot be passed when 'yolo_s3_data_dir' field is not empty in 'data.yaml'."
|
397 |
+
)
|
398 |
+
|
399 |
+
if is_coco_data:
|
400 |
+
from sahi.utils.coco import export_coco_as_yolov5_via_yml
|
401 |
+
from yolov5.utils.general import is_colab
|
402 |
+
|
403 |
+
disable_symlink = False
|
404 |
+
if is_colab():
|
405 |
+
disable_symlink = True
|
406 |
+
|
407 |
+
data = export_coco_as_yolov5_via_yml(
|
408 |
+
yml_path=opt.data, output_dir=save_dir / "data", disable_symlink=disable_symlink
|
409 |
+
)
|
410 |
+
opt.data = data
|
411 |
+
|
412 |
+
# add coco fields to data.yaml
|
413 |
+
with open(data, errors="ignore") as f:
|
414 |
+
updated_data_info = yaml.safe_load(f) # load data dict
|
415 |
+
updated_data_info["train_json_path"] = data_info["train_json_path"]
|
416 |
+
updated_data_info["val_json_path"] = data_info["val_json_path"]
|
417 |
+
updated_data_info["train_image_dir"] = data_info["train_image_dir"]
|
418 |
+
updated_data_info["val_image_dir"] = data_info["val_image_dir"]
|
419 |
+
if data_info.get("yolo_s3_data_dir") is not None:
|
420 |
+
updated_data_info["yolo_s3_data_dir"] = data_info["yolo_s3_data_dir"]
|
421 |
+
if data_info.get("coco_s3_data_dir") is not None:
|
422 |
+
updated_data_info["coco_s3_data_dir"] = data_info["coco_s3_data_dir"]
|
423 |
+
with open(data, "w") as f:
|
424 |
+
yaml.dump(updated_data_info, f)
|
425 |
+
|
426 |
+
w = save_dir / "data" / "coco" # coco dir
|
427 |
+
w.mkdir(parents=True, exist_ok=True) # make dir
|
428 |
+
|
429 |
+
# copy train.json/val.json and coco_data.yml into data/coco/ folder
|
430 |
+
if (
|
431 |
+
"train_json_path" in data_info
|
432 |
+
and Path(data_info["train_json_path"]).is_file()
|
433 |
+
):
|
434 |
+
copyfile(data_info["train_json_path"], str(w / "train.json"))
|
435 |
+
if "val_json_path" in data_info and Path(data_info["val_json_path"]).is_file():
|
436 |
+
copyfile(data_info["val_json_path"], str(w / "val.json"))
|
437 |
+
|
438 |
+
|
439 |
+
def upload_to_s3(opt, data, save_dir):
|
440 |
+
import yaml
|
441 |
+
import os
|
442 |
+
from yolov5.utils.general import colorstr
|
443 |
+
from yolov5.utils.aws import upload_file_to_s3, upload_folder_to_s3
|
444 |
+
|
445 |
+
with open(data, errors="ignore") as f:
|
446 |
+
data_info = yaml.safe_load(f) # load data dict
|
447 |
+
# upload yolo formatted data to s3
|
448 |
+
s3_folder = "s3://" + str(
|
449 |
+
Path(opt.s3_upload_dir.replace("s3://", "")) / save_dir.name / "data"
|
450 |
+
).replace(os.sep, "/")
|
451 |
+
LOGGER.info(f"{colorstr('aws:')} Uploading yolo formatted dataset to {s3_folder}")
|
452 |
+
s3_file = s3_folder + "/data.yaml"
|
453 |
+
result = upload_file_to_s3(local_file=opt.data, s3_file=s3_file)
|
454 |
+
s3_folder_train = s3_folder + "/train/"
|
455 |
+
result = upload_folder_to_s3(
|
456 |
+
local_folder=data_info["train"], s3_folder=s3_folder_train
|
457 |
+
)
|
458 |
+
s3_folder_val = s3_folder + "/val/"
|
459 |
+
result = upload_folder_to_s3(local_folder=data_info["val"], s3_folder=s3_folder_val)
|
460 |
+
if result:
|
461 |
+
LOGGER.info(
|
462 |
+
f"{colorstr('aws:')} Dataset has been successfully uploaded to {s3_folder}"
|
463 |
+
)
|
infer/yolov5/hubconf.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
import torch
|
7 |
+
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
|
8 |
+
model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
|
9 |
+
model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
|
10 |
+
model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
|
11 |
+
"""
|
12 |
+
|
13 |
+
import torch
|
14 |
+
|
15 |
+
|
16 |
+
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
|
17 |
+
"""Creates or loads a YOLOv5 model
|
18 |
+
|
19 |
+
Arguments:
|
20 |
+
name (str): model name 'yolov5s' or path 'path/to/best.pt'
|
21 |
+
pretrained (bool): load pretrained weights into the model
|
22 |
+
channels (int): number of input channels
|
23 |
+
classes (int): number of model classes
|
24 |
+
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
|
25 |
+
verbose (bool): print all information to screen
|
26 |
+
device (str, torch.device, None): device to use for model parameters
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
YOLOv5 model
|
30 |
+
"""
|
31 |
+
from pathlib import Path
|
32 |
+
|
33 |
+
from infer.yolov5.models.common import AutoShape, DetectMultiBackend
|
34 |
+
from infer.yolov5.models.experimental import attempt_load
|
35 |
+
from infer.yolov5.models.yolo import ClassificationModel, DetectionModel, SegmentationModel
|
36 |
+
from infer.yolov5.utils.downloads import attempt_download
|
37 |
+
from infer.yolov5.utils.general import LOGGER, check_requirements, intersect_dicts, logging
|
38 |
+
from infer.yolov5.utils.torch_utils import select_device
|
39 |
+
|
40 |
+
if not verbose:
|
41 |
+
LOGGER.setLevel(logging.WARNING)
|
42 |
+
check_requirements(exclude=('opencv-python', 'tensorboard', 'thop'))
|
43 |
+
name = Path(name)
|
44 |
+
path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
|
45 |
+
try:
|
46 |
+
device = select_device(device)
|
47 |
+
if pretrained and channels == 3 and classes == 80:
|
48 |
+
try:
|
49 |
+
model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
|
50 |
+
if autoshape:
|
51 |
+
if model.pt and isinstance(model.model, ClassificationModel):
|
52 |
+
LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. '
|
53 |
+
'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
|
54 |
+
elif model.pt and isinstance(model.model, SegmentationModel):
|
55 |
+
LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. '
|
56 |
+
'You will not be able to run inference with this model.')
|
57 |
+
else:
|
58 |
+
model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
|
59 |
+
except Exception:
|
60 |
+
model = attempt_load(path, device=device, fuse=False) # arbitrary model
|
61 |
+
else:
|
62 |
+
cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
|
63 |
+
model = DetectionModel(cfg, channels, classes) # create model
|
64 |
+
if pretrained:
|
65 |
+
ckpt = torch.load(attempt_download(path), map_location=device) # load
|
66 |
+
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
67 |
+
csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
|
68 |
+
model.load_state_dict(csd, strict=False) # load
|
69 |
+
if len(ckpt['model'].names) == classes:
|
70 |
+
model.names = ckpt['model'].names # set class names attribute
|
71 |
+
if not verbose:
|
72 |
+
LOGGER.setLevel(logging.INFO) # reset to default
|
73 |
+
return model.to(device)
|
74 |
+
|
75 |
+
except Exception as e:
|
76 |
+
help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading'
|
77 |
+
s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
|
78 |
+
raise Exception(s) from e
|
79 |
+
|
80 |
+
|
81 |
+
def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
|
82 |
+
# YOLOv5 custom or local model
|
83 |
+
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
|
84 |
+
|
85 |
+
|
86 |
+
def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
87 |
+
# YOLOv5-nano model https://github.com/ultralytics/yolov5
|
88 |
+
return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
|
89 |
+
|
90 |
+
|
91 |
+
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
92 |
+
# YOLOv5-small model https://github.com/ultralytics/yolov5
|
93 |
+
return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
|
94 |
+
|
95 |
+
|
96 |
+
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
97 |
+
# YOLOv5-medium model https://github.com/ultralytics/yolov5
|
98 |
+
return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
|
99 |
+
|
100 |
+
|
101 |
+
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
102 |
+
# YOLOv5-large model https://github.com/ultralytics/yolov5
|
103 |
+
return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
|
104 |
+
|
105 |
+
|
106 |
+
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
107 |
+
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
|
108 |
+
return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
|
109 |
+
|
110 |
+
|
111 |
+
def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
112 |
+
# YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
|
113 |
+
return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
|
114 |
+
|
115 |
+
|
116 |
+
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
117 |
+
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
|
118 |
+
return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
|
119 |
+
|
120 |
+
|
121 |
+
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
122 |
+
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
|
123 |
+
return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
|
124 |
+
|
125 |
+
|
126 |
+
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
127 |
+
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
|
128 |
+
return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
|
129 |
+
|
130 |
+
|
131 |
+
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
|
132 |
+
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
|
133 |
+
return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
|
134 |
+
|
135 |
+
|
136 |
+
if __name__ == '__main__':
|
137 |
+
import argparse
|
138 |
+
from pathlib import Path
|
139 |
+
|
140 |
+
import numpy as np
|
141 |
+
from PIL import Image
|
142 |
+
|
143 |
+
from yolov5.utils.general import cv2, print_args
|
144 |
+
|
145 |
+
# Argparser
|
146 |
+
parser = argparse.ArgumentParser()
|
147 |
+
parser.add_argument('--model', type=str, default='yolov5s', help='model name')
|
148 |
+
opt = parser.parse_args()
|
149 |
+
print_args(vars(opt))
|
150 |
+
|
151 |
+
# Model
|
152 |
+
model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
|
153 |
+
# model = custom(path='path/to/model.pt') # custom
|
154 |
+
|
155 |
+
# Images
|
156 |
+
imgs = [
|
157 |
+
'data/images/zidane.jpg', # filename
|
158 |
+
Path('data/images/zidane.jpg'), # Path
|
159 |
+
'https://ultralytics.com/images/zidane.jpg', # URI
|
160 |
+
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
|
161 |
+
Image.open('data/images/bus.jpg'), # PIL
|
162 |
+
np.zeros((320, 640, 3))] # numpy
|
163 |
+
|
164 |
+
# Inference
|
165 |
+
results = model(imgs, size=320) # batched inference
|
166 |
+
|
167 |
+
# Results
|
168 |
+
results.print()
|
169 |
+
results.save()
|
infer/yolov5/lung_en.yaml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# COCO 2017 dataset http://cocodataset.org - first 128 training images
|
2 |
+
# Train command: python train.py --data coco128.yaml
|
3 |
+
# Default dataset location is next to YOLOv5:
|
4 |
+
# /parent_folder
|
5 |
+
# /coco128
|
6 |
+
# /yolov5
|
7 |
+
|
8 |
+
|
9 |
+
# download command/URL (optional)
|
10 |
+
#download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
|
11 |
+
|
12 |
+
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
13 |
+
#train: data/lung/images/train/
|
14 |
+
#val: data/lung/images/val/
|
15 |
+
|
16 |
+
# number of classes
|
17 |
+
nc: 18
|
18 |
+
|
19 |
+
# class names
|
20 |
+
|
21 |
+
names: ['Epiglottis', 'Vocal Fold', 'Trachea', 'Left Main Bronchus', 'Carina', 'Right Main Bronchus', 'Left Upper Lobar Bronchus',
|
22 |
+
'Left Lower Bronchus', 'Right Upper Lobar Bronchus', 'Intermediate Bronchus', 'Right Lower Lobar Bronchus',
|
23 |
+
'Left Divsion Bronchus', 'Left Lingular Bronchus', 'Left Superior Segment',
|
24 |
+
'Left Basal Bronchus', 'Right Middle Lobar Bronchus', 'Right Basal Bronchus', 'Right Superior Segment Bronchus']
|
25 |
+
#names: ['epiglottis', 'vocal cord', 'trachea', 'carina', 'right main bronchus', 'intermediate bronchus',
|
26 |
+
# 'right upper lobar bronchus', 'right middle lobar bronchus', 'right lower lobar bronchus', 'right superior segment bronchus',
|
27 |
+
# 'right basal bronchus', 'left main bronchus', 'left upper lobar bronchus', 'left division bronchus',
|
28 |
+
# 'left lingular bronchus', 'left lower bronchus', 'left superior segment', 'left basal bronchus']
|
infer/yolov5/models/__init__.py
ADDED
File without changes
|
infer/yolov5/models/common.py
ADDED
@@ -0,0 +1,878 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Common modules
|
4 |
+
"""
|
5 |
+
|
6 |
+
import ast
|
7 |
+
import contextlib
|
8 |
+
import json
|
9 |
+
import math
|
10 |
+
import platform
|
11 |
+
import warnings
|
12 |
+
import zipfile
|
13 |
+
from collections import OrderedDict, namedtuple
|
14 |
+
from copy import copy
|
15 |
+
from pathlib import Path
|
16 |
+
from urllib.parse import urlparse
|
17 |
+
|
18 |
+
import cv2
|
19 |
+
import numpy as np
|
20 |
+
import pandas as pd
|
21 |
+
import requests
|
22 |
+
import torch
|
23 |
+
import torch.nn as nn
|
24 |
+
from PIL import Image
|
25 |
+
from torch.cuda import amp
|
26 |
+
|
27 |
+
from infer.yolov5.utils import TryExcept
|
28 |
+
from infer.yolov5.utils.dataloaders import exif_transpose, letterbox
|
29 |
+
from infer.yolov5.utils.downloads import attempt_download_from_hub
|
30 |
+
from infer.yolov5.utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
31 |
+
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
32 |
+
xyxy2xywh, yaml_load)
|
33 |
+
from infer.yolov5.utils.plots import Annotator, colors, save_one_box
|
34 |
+
from infer.yolov5.utils.torch_utils import copy_attr, smart_inference_mode
|
35 |
+
|
36 |
+
|
37 |
+
def autopad(k, p=None, d=1): # kernel, padding, dilation
|
38 |
+
# Pad to 'same' shape outputs
|
39 |
+
if d > 1:
|
40 |
+
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
|
41 |
+
if p is None:
|
42 |
+
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
43 |
+
return p
|
44 |
+
|
45 |
+
|
46 |
+
class Conv(nn.Module):
|
47 |
+
# Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
|
48 |
+
default_act = nn.SiLU() # default activation
|
49 |
+
|
50 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
|
51 |
+
super().__init__()
|
52 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
|
53 |
+
self.bn = nn.BatchNorm2d(c2)
|
54 |
+
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
return self.act(self.bn(self.conv(x)))
|
58 |
+
|
59 |
+
def forward_fuse(self, x):
|
60 |
+
return self.act(self.conv(x))
|
61 |
+
|
62 |
+
|
63 |
+
class DWConv(Conv):
|
64 |
+
# Depth-wise convolution
|
65 |
+
def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
|
66 |
+
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
|
67 |
+
|
68 |
+
|
69 |
+
class DWConvTranspose2d(nn.ConvTranspose2d):
|
70 |
+
# Depth-wise transpose convolution
|
71 |
+
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
|
72 |
+
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
|
73 |
+
|
74 |
+
|
75 |
+
class TransformerLayer(nn.Module):
|
76 |
+
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
|
77 |
+
def __init__(self, c, num_heads):
|
78 |
+
super().__init__()
|
79 |
+
self.q = nn.Linear(c, c, bias=False)
|
80 |
+
self.k = nn.Linear(c, c, bias=False)
|
81 |
+
self.v = nn.Linear(c, c, bias=False)
|
82 |
+
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
|
83 |
+
self.fc1 = nn.Linear(c, c, bias=False)
|
84 |
+
self.fc2 = nn.Linear(c, c, bias=False)
|
85 |
+
|
86 |
+
def forward(self, x):
|
87 |
+
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
|
88 |
+
x = self.fc2(self.fc1(x)) + x
|
89 |
+
return x
|
90 |
+
|
91 |
+
|
92 |
+
class TransformerBlock(nn.Module):
|
93 |
+
# Vision Transformer https://arxiv.org/abs/2010.11929
|
94 |
+
def __init__(self, c1, c2, num_heads, num_layers):
|
95 |
+
super().__init__()
|
96 |
+
self.conv = None
|
97 |
+
if c1 != c2:
|
98 |
+
self.conv = Conv(c1, c2)
|
99 |
+
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
100 |
+
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
|
101 |
+
self.c2 = c2
|
102 |
+
|
103 |
+
def forward(self, x):
|
104 |
+
if self.conv is not None:
|
105 |
+
x = self.conv(x)
|
106 |
+
b, _, w, h = x.shape
|
107 |
+
p = x.flatten(2).permute(2, 0, 1)
|
108 |
+
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
|
109 |
+
|
110 |
+
|
111 |
+
class Bottleneck(nn.Module):
|
112 |
+
# Standard bottleneck
|
113 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
114 |
+
super().__init__()
|
115 |
+
c_ = int(c2 * e) # hidden channels
|
116 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
117 |
+
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
118 |
+
self.add = shortcut and c1 == c2
|
119 |
+
|
120 |
+
def forward(self, x):
|
121 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
122 |
+
|
123 |
+
|
124 |
+
class BottleneckCSP(nn.Module):
|
125 |
+
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
126 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
127 |
+
super().__init__()
|
128 |
+
c_ = int(c2 * e) # hidden channels
|
129 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
130 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
131 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
132 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
133 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
134 |
+
self.act = nn.SiLU()
|
135 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
136 |
+
|
137 |
+
def forward(self, x):
|
138 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
139 |
+
y2 = self.cv2(x)
|
140 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
|
141 |
+
|
142 |
+
|
143 |
+
class CrossConv(nn.Module):
|
144 |
+
# Cross Convolution Downsample
|
145 |
+
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
146 |
+
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
147 |
+
super().__init__()
|
148 |
+
c_ = int(c2 * e) # hidden channels
|
149 |
+
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
150 |
+
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
151 |
+
self.add = shortcut and c1 == c2
|
152 |
+
|
153 |
+
def forward(self, x):
|
154 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
155 |
+
|
156 |
+
|
157 |
+
class C3(nn.Module):
|
158 |
+
# CSP Bottleneck with 3 convolutions
|
159 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
160 |
+
super().__init__()
|
161 |
+
c_ = int(c2 * e) # hidden channels
|
162 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
163 |
+
self.cv2 = Conv(c1, c_, 1, 1)
|
164 |
+
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
|
165 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
166 |
+
|
167 |
+
def forward(self, x):
|
168 |
+
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
|
169 |
+
|
170 |
+
|
171 |
+
class C3x(C3):
|
172 |
+
# C3 module with cross-convolutions
|
173 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
174 |
+
super().__init__(c1, c2, n, shortcut, g, e)
|
175 |
+
c_ = int(c2 * e)
|
176 |
+
self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
|
177 |
+
|
178 |
+
|
179 |
+
class C3TR(C3):
|
180 |
+
# C3 module with TransformerBlock()
|
181 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
182 |
+
super().__init__(c1, c2, n, shortcut, g, e)
|
183 |
+
c_ = int(c2 * e)
|
184 |
+
self.m = TransformerBlock(c_, c_, 4, n)
|
185 |
+
|
186 |
+
|
187 |
+
class C3SPP(C3):
|
188 |
+
# C3 module with SPP()
|
189 |
+
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
|
190 |
+
super().__init__(c1, c2, n, shortcut, g, e)
|
191 |
+
c_ = int(c2 * e)
|
192 |
+
self.m = SPP(c_, c_, k)
|
193 |
+
|
194 |
+
|
195 |
+
class C3Ghost(C3):
|
196 |
+
# C3 module with GhostBottleneck()
|
197 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
198 |
+
super().__init__(c1, c2, n, shortcut, g, e)
|
199 |
+
c_ = int(c2 * e) # hidden channels
|
200 |
+
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
|
201 |
+
|
202 |
+
|
203 |
+
class SPP(nn.Module):
|
204 |
+
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
|
205 |
+
def __init__(self, c1, c2, k=(5, 9, 13)):
|
206 |
+
super().__init__()
|
207 |
+
c_ = c1 // 2 # hidden channels
|
208 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
209 |
+
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
210 |
+
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
211 |
+
|
212 |
+
def forward(self, x):
|
213 |
+
x = self.cv1(x)
|
214 |
+
with warnings.catch_warnings():
|
215 |
+
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
216 |
+
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
217 |
+
|
218 |
+
|
219 |
+
class SPPF(nn.Module):
|
220 |
+
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
221 |
+
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
|
222 |
+
super().__init__()
|
223 |
+
c_ = c1 // 2 # hidden channels
|
224 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
225 |
+
self.cv2 = Conv(c_ * 4, c2, 1, 1)
|
226 |
+
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
227 |
+
|
228 |
+
def forward(self, x):
|
229 |
+
x = self.cv1(x)
|
230 |
+
with warnings.catch_warnings():
|
231 |
+
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
232 |
+
y1 = self.m(x)
|
233 |
+
y2 = self.m(y1)
|
234 |
+
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
235 |
+
|
236 |
+
|
237 |
+
class Focus(nn.Module):
|
238 |
+
# Focus wh information into c-space
|
239 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
240 |
+
super().__init__()
|
241 |
+
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
|
242 |
+
# self.contract = Contract(gain=2)
|
243 |
+
|
244 |
+
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
245 |
+
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
|
246 |
+
# return self.conv(self.contract(x))
|
247 |
+
|
248 |
+
|
249 |
+
class GhostConv(nn.Module):
|
250 |
+
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
251 |
+
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
252 |
+
super().__init__()
|
253 |
+
c_ = c2 // 2 # hidden channels
|
254 |
+
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
|
255 |
+
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
|
256 |
+
|
257 |
+
def forward(self, x):
|
258 |
+
y = self.cv1(x)
|
259 |
+
return torch.cat((y, self.cv2(y)), 1)
|
260 |
+
|
261 |
+
|
262 |
+
class GhostBottleneck(nn.Module):
|
263 |
+
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
264 |
+
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
|
265 |
+
super().__init__()
|
266 |
+
c_ = c2 // 2
|
267 |
+
self.conv = nn.Sequential(
|
268 |
+
GhostConv(c1, c_, 1, 1), # pw
|
269 |
+
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
270 |
+
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
|
271 |
+
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
|
272 |
+
act=False)) if s == 2 else nn.Identity()
|
273 |
+
|
274 |
+
def forward(self, x):
|
275 |
+
return self.conv(x) + self.shortcut(x)
|
276 |
+
|
277 |
+
|
278 |
+
class Contract(nn.Module):
|
279 |
+
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
|
280 |
+
def __init__(self, gain=2):
|
281 |
+
super().__init__()
|
282 |
+
self.gain = gain
|
283 |
+
|
284 |
+
def forward(self, x):
|
285 |
+
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
|
286 |
+
s = self.gain
|
287 |
+
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
|
288 |
+
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
|
289 |
+
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
|
290 |
+
|
291 |
+
|
292 |
+
class Expand(nn.Module):
|
293 |
+
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
|
294 |
+
def __init__(self, gain=2):
|
295 |
+
super().__init__()
|
296 |
+
self.gain = gain
|
297 |
+
|
298 |
+
def forward(self, x):
|
299 |
+
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
|
300 |
+
s = self.gain
|
301 |
+
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
|
302 |
+
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
|
303 |
+
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
|
304 |
+
|
305 |
+
|
306 |
+
class Concat(nn.Module):
|
307 |
+
# Concatenate a list of tensors along dimension
|
308 |
+
def __init__(self, dimension=1):
|
309 |
+
super().__init__()
|
310 |
+
self.d = dimension
|
311 |
+
|
312 |
+
def forward(self, x):
|
313 |
+
return torch.cat(x, self.d)
|
314 |
+
|
315 |
+
|
316 |
+
class DetectMultiBackend(nn.Module):
|
317 |
+
# YOLOv5 MultiBackend class for python inference on various backends
|
318 |
+
def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True, hf_token=None):
|
319 |
+
# Usage:
|
320 |
+
# PyTorch: weights = *.pt
|
321 |
+
# TorchScript: *.torchscript
|
322 |
+
# ONNX Runtime: *.onnx
|
323 |
+
# ONNX OpenCV DNN: *.onnx --dnn
|
324 |
+
# OpenVINO: *_openvino_model
|
325 |
+
# CoreML: *.mlmodel
|
326 |
+
# TensorRT: *.engine
|
327 |
+
# TensorFlow SavedModel: *_saved_model
|
328 |
+
# TensorFlow GraphDef: *.pb
|
329 |
+
# TensorFlow Lite: *.tflite
|
330 |
+
# TensorFlow Edge TPU: *_edgetpu.tflite
|
331 |
+
# PaddlePaddle: *_paddle_model
|
332 |
+
from infer.yolov5.models.experimental import attempt_download, attempt_load # scoped to avoid circular import
|
333 |
+
|
334 |
+
super().__init__()
|
335 |
+
w = str(weights[0] if isinstance(weights, list) else weights)
|
336 |
+
|
337 |
+
# try to dowload from hf hub
|
338 |
+
result = attempt_download_from_hub(w, hf_token=hf_token)
|
339 |
+
if result is not None:
|
340 |
+
w = result
|
341 |
+
|
342 |
+
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
|
343 |
+
fp16 &= pt or jit or onnx or engine # FP16
|
344 |
+
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
345 |
+
stride = 32 # default stride
|
346 |
+
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
347 |
+
if not (pt or triton):
|
348 |
+
w = attempt_download(w) # download if not local
|
349 |
+
|
350 |
+
if pt: # PyTorch
|
351 |
+
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
|
352 |
+
stride = max(int(model.stride.max()), 32) # model stride
|
353 |
+
names = model.module.names if hasattr(model, 'module') else model.names # get class names
|
354 |
+
model.half() if fp16 else model.float()
|
355 |
+
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
|
356 |
+
elif jit: # TorchScript
|
357 |
+
LOGGER.info(f'Loading {w} for TorchScript inference...')
|
358 |
+
extra_files = {'config.txt': ''} # model metadata
|
359 |
+
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
|
360 |
+
model.half() if fp16 else model.float()
|
361 |
+
if extra_files['config.txt']: # load metadata dict
|
362 |
+
d = json.loads(extra_files['config.txt'],
|
363 |
+
object_hook=lambda d: {
|
364 |
+
int(k) if k.isdigit() else k: v
|
365 |
+
for k, v in d.items()})
|
366 |
+
stride, names = int(d['stride']), d['names']
|
367 |
+
elif dnn: # ONNX OpenCV DNN
|
368 |
+
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
|
369 |
+
check_requirements('opencv-python>=4.5.4')
|
370 |
+
net = cv2.dnn.readNetFromONNX(w)
|
371 |
+
elif onnx: # ONNX Runtime
|
372 |
+
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
|
373 |
+
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
|
374 |
+
import onnxruntime
|
375 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
376 |
+
session = onnxruntime.InferenceSession(w, providers=providers)
|
377 |
+
output_names = [x.name for x in session.get_outputs()]
|
378 |
+
meta = session.get_modelmeta().custom_metadata_map # metadata
|
379 |
+
if 'stride' in meta:
|
380 |
+
stride, names = int(meta['stride']), eval(meta['names'])
|
381 |
+
elif xml: # OpenVINO
|
382 |
+
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
383 |
+
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
384 |
+
from openvino.runtime import Core, Layout, get_batch
|
385 |
+
ie = Core()
|
386 |
+
if not Path(w).is_file(): # if not *.xml
|
387 |
+
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
388 |
+
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
389 |
+
if network.get_parameters()[0].get_layout().empty:
|
390 |
+
network.get_parameters()[0].set_layout(Layout('NCHW'))
|
391 |
+
batch_dim = get_batch(network)
|
392 |
+
if batch_dim.is_static:
|
393 |
+
batch_size = batch_dim.get_length()
|
394 |
+
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
|
395 |
+
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
396 |
+
elif engine: # TensorRT
|
397 |
+
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
398 |
+
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
399 |
+
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
|
400 |
+
if device.type == 'cpu':
|
401 |
+
device = torch.device('cuda:0')
|
402 |
+
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
|
403 |
+
logger = trt.Logger(trt.Logger.INFO)
|
404 |
+
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
|
405 |
+
model = runtime.deserialize_cuda_engine(f.read())
|
406 |
+
context = model.create_execution_context()
|
407 |
+
bindings = OrderedDict()
|
408 |
+
output_names = []
|
409 |
+
fp16 = False # default updated below
|
410 |
+
dynamic = False
|
411 |
+
for i in range(model.num_bindings):
|
412 |
+
name = model.get_binding_name(i)
|
413 |
+
dtype = trt.nptype(model.get_binding_dtype(i))
|
414 |
+
if model.binding_is_input(i):
|
415 |
+
if -1 in tuple(model.get_binding_shape(i)): # dynamic
|
416 |
+
dynamic = True
|
417 |
+
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
|
418 |
+
if dtype == np.float16:
|
419 |
+
fp16 = True
|
420 |
+
else: # output
|
421 |
+
output_names.append(name)
|
422 |
+
shape = tuple(context.get_binding_shape(i))
|
423 |
+
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
|
424 |
+
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
|
425 |
+
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
|
426 |
+
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
|
427 |
+
elif coreml: # CoreML
|
428 |
+
LOGGER.info(f'Loading {w} for CoreML inference...')
|
429 |
+
import coremltools as ct
|
430 |
+
model = ct.models.MLModel(w)
|
431 |
+
elif saved_model: # TF SavedModel
|
432 |
+
LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
|
433 |
+
import tensorflow as tf
|
434 |
+
keras = False # assume TF1 saved_model
|
435 |
+
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
|
436 |
+
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
|
437 |
+
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
|
438 |
+
import tensorflow as tf
|
439 |
+
|
440 |
+
def wrap_frozen_graph(gd, inputs, outputs):
|
441 |
+
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
|
442 |
+
ge = x.graph.as_graph_element
|
443 |
+
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
|
444 |
+
|
445 |
+
def gd_outputs(gd):
|
446 |
+
name_list, input_list = [], []
|
447 |
+
for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
|
448 |
+
name_list.append(node.name)
|
449 |
+
input_list.extend(node.input)
|
450 |
+
return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
|
451 |
+
|
452 |
+
gd = tf.Graph().as_graph_def() # TF GraphDef
|
453 |
+
with open(w, 'rb') as f:
|
454 |
+
gd.ParseFromString(f.read())
|
455 |
+
frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
|
456 |
+
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
457 |
+
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
|
458 |
+
from tflite_runtime.interpreter import Interpreter, load_delegate
|
459 |
+
except ImportError:
|
460 |
+
import tensorflow as tf
|
461 |
+
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
|
462 |
+
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
|
463 |
+
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
|
464 |
+
delegate = {
|
465 |
+
'Linux': 'libedgetpu.so.1',
|
466 |
+
'Darwin': 'libedgetpu.1.dylib',
|
467 |
+
'Windows': 'edgetpu.dll'}[platform.system()]
|
468 |
+
interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
|
469 |
+
else: # TFLite
|
470 |
+
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
|
471 |
+
interpreter = Interpreter(model_path=w) # load TFLite model
|
472 |
+
interpreter.allocate_tensors() # allocate
|
473 |
+
input_details = interpreter.get_input_details() # inputs
|
474 |
+
output_details = interpreter.get_output_details() # outputs
|
475 |
+
# load metadata
|
476 |
+
with contextlib.suppress(zipfile.BadZipFile):
|
477 |
+
with zipfile.ZipFile(w, 'r') as model:
|
478 |
+
meta_file = model.namelist()[0]
|
479 |
+
meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
|
480 |
+
stride, names = int(meta['stride']), meta['names']
|
481 |
+
elif tfjs: # TF.js
|
482 |
+
raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
|
483 |
+
elif paddle: # PaddlePaddle
|
484 |
+
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
|
485 |
+
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
|
486 |
+
import paddle.inference as pdi
|
487 |
+
if not Path(w).is_file(): # if not *.pdmodel
|
488 |
+
w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
|
489 |
+
weights = Path(w).with_suffix('.pdiparams')
|
490 |
+
config = pdi.Config(str(w), str(weights))
|
491 |
+
if cuda:
|
492 |
+
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
|
493 |
+
predictor = pdi.create_predictor(config)
|
494 |
+
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
|
495 |
+
output_names = predictor.get_output_names()
|
496 |
+
elif triton: # NVIDIA Triton Inference Server
|
497 |
+
LOGGER.info(f'Using {w} as Triton Inference Server...')
|
498 |
+
check_requirements('tritonclient[all]')
|
499 |
+
from infer.yolov5.utils.triton import TritonRemoteModel
|
500 |
+
model = TritonRemoteModel(url=w)
|
501 |
+
nhwc = model.runtime.startswith('tensorflow')
|
502 |
+
else:
|
503 |
+
raise NotImplementedError(f'ERROR: {w} is not a supported format')
|
504 |
+
|
505 |
+
# class names
|
506 |
+
if 'names' not in locals():
|
507 |
+
names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
|
508 |
+
if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
|
509 |
+
names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
|
510 |
+
|
511 |
+
self.__dict__.update(locals()) # assign all variables to self
|
512 |
+
|
513 |
+
def forward(self, im, augment=False, visualize=False):
|
514 |
+
# YOLOv5 MultiBackend inference
|
515 |
+
b, ch, h, w = im.shape # batch, channel, height, width
|
516 |
+
if self.fp16 and im.dtype != torch.float16:
|
517 |
+
im = im.half() # to FP16
|
518 |
+
if self.nhwc:
|
519 |
+
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
|
520 |
+
|
521 |
+
if self.pt: # PyTorch
|
522 |
+
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
|
523 |
+
elif self.jit: # TorchScript
|
524 |
+
y = self.model(im)
|
525 |
+
elif self.dnn: # ONNX OpenCV DNN
|
526 |
+
im = im.cpu().numpy() # torch to numpy
|
527 |
+
self.net.setInput(im)
|
528 |
+
y = self.net.forward()
|
529 |
+
elif self.onnx: # ONNX Runtime
|
530 |
+
im = im.cpu().numpy() # torch to numpy
|
531 |
+
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
532 |
+
elif self.xml: # OpenVINO
|
533 |
+
im = im.cpu().numpy() # FP32
|
534 |
+
y = list(self.executable_network([im]).values())
|
535 |
+
elif self.engine: # TensorRT
|
536 |
+
if self.dynamic and im.shape != self.bindings['images'].shape:
|
537 |
+
i = self.model.get_binding_index('images')
|
538 |
+
self.context.set_binding_shape(i, im.shape) # reshape if dynamic
|
539 |
+
self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
|
540 |
+
for name in self.output_names:
|
541 |
+
i = self.model.get_binding_index(name)
|
542 |
+
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
|
543 |
+
s = self.bindings['images'].shape
|
544 |
+
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
|
545 |
+
self.binding_addrs['images'] = int(im.data_ptr())
|
546 |
+
self.context.execute_v2(list(self.binding_addrs.values()))
|
547 |
+
y = [self.bindings[x].data for x in sorted(self.output_names)]
|
548 |
+
elif self.coreml: # CoreML
|
549 |
+
im = im.cpu().numpy()
|
550 |
+
im = Image.fromarray((im[0] * 255).astype('uint8'))
|
551 |
+
# im = im.resize((192, 320), Image.ANTIALIAS)
|
552 |
+
y = self.model.predict({'image': im}) # coordinates are xywh normalized
|
553 |
+
if 'confidence' in y:
|
554 |
+
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
|
555 |
+
conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
|
556 |
+
y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
|
557 |
+
else:
|
558 |
+
y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
|
559 |
+
elif self.paddle: # PaddlePaddle
|
560 |
+
im = im.cpu().numpy().astype(np.float32)
|
561 |
+
self.input_handle.copy_from_cpu(im)
|
562 |
+
self.predictor.run()
|
563 |
+
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
|
564 |
+
elif self.triton: # NVIDIA Triton Inference Server
|
565 |
+
y = self.model(im)
|
566 |
+
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
|
567 |
+
im = im.cpu().numpy()
|
568 |
+
if self.saved_model: # SavedModel
|
569 |
+
y = self.model(im, training=False) if self.keras else self.model(im)
|
570 |
+
elif self.pb: # GraphDef
|
571 |
+
y = self.frozen_func(x=self.tf.constant(im))
|
572 |
+
else: # Lite or Edge TPU
|
573 |
+
input = self.input_details[0]
|
574 |
+
int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
575 |
+
if int8:
|
576 |
+
scale, zero_point = input['quantization']
|
577 |
+
im = (im / scale + zero_point).astype(np.uint8) # de-scale
|
578 |
+
self.interpreter.set_tensor(input['index'], im)
|
579 |
+
self.interpreter.invoke()
|
580 |
+
y = []
|
581 |
+
for output in self.output_details:
|
582 |
+
x = self.interpreter.get_tensor(output['index'])
|
583 |
+
if int8:
|
584 |
+
scale, zero_point = output['quantization']
|
585 |
+
x = (x.astype(np.float32) - zero_point) * scale # re-scale
|
586 |
+
y.append(x)
|
587 |
+
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
|
588 |
+
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
|
589 |
+
|
590 |
+
if isinstance(y, (list, tuple)):
|
591 |
+
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
|
592 |
+
else:
|
593 |
+
return self.from_numpy(y)
|
594 |
+
|
595 |
+
def from_numpy(self, x):
|
596 |
+
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
|
597 |
+
|
598 |
+
def warmup(self, imgsz=(1, 3, 640, 640)):
|
599 |
+
# Warmup model by running inference once
|
600 |
+
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
|
601 |
+
if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
|
602 |
+
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
|
603 |
+
for _ in range(2 if self.jit else 1): #
|
604 |
+
self.forward(im) # warmup
|
605 |
+
|
606 |
+
@staticmethod
|
607 |
+
def _model_type(p='path/to/model.pt'):
|
608 |
+
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
|
609 |
+
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
|
610 |
+
from infer.yolov5.export import export_formats
|
611 |
+
from infer.yolov5.utils.downloads import is_url
|
612 |
+
sf = list(export_formats().Suffix) # export suffixes
|
613 |
+
if not is_url(p, check=False):
|
614 |
+
check_suffix(p, sf) # checks
|
615 |
+
url = urlparse(p) # if url may be Triton inference server
|
616 |
+
types = [s in Path(p).name for s in sf]
|
617 |
+
types[8] &= not types[9] # tflite &= not edgetpu
|
618 |
+
triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
|
619 |
+
return types + [triton]
|
620 |
+
|
621 |
+
@staticmethod
|
622 |
+
def _load_metadata(f=Path('path/to/meta.yaml')):
|
623 |
+
# Load metadata from meta.yaml if it exists
|
624 |
+
if f.exists():
|
625 |
+
d = yaml_load(f)
|
626 |
+
return d['stride'], d['names'] # assign stride, names
|
627 |
+
return None, None
|
628 |
+
|
629 |
+
|
630 |
+
class AutoShape(nn.Module):
|
631 |
+
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
632 |
+
conf = 0.25 # NMS confidence threshold
|
633 |
+
iou = 0.45 # NMS IoU threshold
|
634 |
+
agnostic = False # NMS class-agnostic
|
635 |
+
multi_label = False # NMS multiple labels per box
|
636 |
+
classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
|
637 |
+
max_det = 1000 # maximum number of detections per image
|
638 |
+
amp = False # Automatic Mixed Precision (AMP) inference
|
639 |
+
|
640 |
+
def __init__(self, model, verbose=True):
|
641 |
+
super().__init__()
|
642 |
+
if verbose:
|
643 |
+
LOGGER.info('Adding AutoShape... ')
|
644 |
+
copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
|
645 |
+
self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
|
646 |
+
self.pt = not self.dmb or model.pt # PyTorch model
|
647 |
+
self.model = model.eval()
|
648 |
+
if self.pt:
|
649 |
+
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
650 |
+
m.inplace = False # Detect.inplace=False for safe multithread inference
|
651 |
+
m.export = True # do not output loss values
|
652 |
+
|
653 |
+
def _apply(self, fn):
|
654 |
+
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
655 |
+
self = super()._apply(fn)
|
656 |
+
if self.pt:
|
657 |
+
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
658 |
+
m.stride = fn(m.stride)
|
659 |
+
m.grid = list(map(fn, m.grid))
|
660 |
+
if isinstance(m.anchor_grid, list):
|
661 |
+
m.anchor_grid = list(map(fn, m.anchor_grid))
|
662 |
+
return self
|
663 |
+
|
664 |
+
@smart_inference_mode()
|
665 |
+
def forward(self, ims, size=640, augment=False, profile=False):
|
666 |
+
# Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
|
667 |
+
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
|
668 |
+
# URI: = 'https://ultralytics.com/images/zidane.jpg'
|
669 |
+
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
|
670 |
+
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
|
671 |
+
# numpy: = np.zeros((640,1280,3)) # HWC
|
672 |
+
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
|
673 |
+
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
674 |
+
|
675 |
+
dt = (Profile(), Profile(), Profile())
|
676 |
+
with dt[0]:
|
677 |
+
if isinstance(size, int): # expand
|
678 |
+
size = (size, size)
|
679 |
+
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
|
680 |
+
autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
|
681 |
+
if isinstance(ims, torch.Tensor): # torch
|
682 |
+
with amp.autocast(autocast):
|
683 |
+
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
|
684 |
+
|
685 |
+
# Pre-process
|
686 |
+
n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
|
687 |
+
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
|
688 |
+
for i, im in enumerate(ims):
|
689 |
+
f = f'image{i}' # filename
|
690 |
+
if isinstance(im, (str, Path)): # filename or uri
|
691 |
+
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
|
692 |
+
im = np.asarray(exif_transpose(im))
|
693 |
+
elif isinstance(im, Image.Image): # PIL Image
|
694 |
+
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
|
695 |
+
files.append(Path(f).with_suffix('.jpg').name)
|
696 |
+
if im.shape[0] < 5: # image in CHW
|
697 |
+
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
698 |
+
im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
|
699 |
+
s = im.shape[:2] # HWC
|
700 |
+
shape0.append(s) # image shape
|
701 |
+
g = max(size) / max(s) # gain
|
702 |
+
shape1.append([int(y * g) for y in s])
|
703 |
+
ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
|
704 |
+
shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
|
705 |
+
x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
|
706 |
+
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
|
707 |
+
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
|
708 |
+
|
709 |
+
with amp.autocast(autocast):
|
710 |
+
# Inference
|
711 |
+
with dt[1]:
|
712 |
+
y = self.model(x, augment=augment) # forward
|
713 |
+
|
714 |
+
# Post-process
|
715 |
+
with dt[2]:
|
716 |
+
y = non_max_suppression(y if self.dmb else y[0],
|
717 |
+
self.conf,
|
718 |
+
self.iou,
|
719 |
+
self.classes,
|
720 |
+
self.agnostic,
|
721 |
+
self.multi_label,
|
722 |
+
max_det=self.max_det) # NMS
|
723 |
+
for i in range(n):
|
724 |
+
scale_boxes(shape1, y[i][:, :4], shape0[i])
|
725 |
+
|
726 |
+
return Detections(ims, y, files, dt, self.names, x.shape)
|
727 |
+
|
728 |
+
|
729 |
+
class Detections:
|
730 |
+
# YOLOv5 detections class for inference results
|
731 |
+
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
|
732 |
+
super().__init__()
|
733 |
+
d = pred[0].device # device
|
734 |
+
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
|
735 |
+
self.ims = ims # list of images as numpy arrays
|
736 |
+
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
737 |
+
self.names = names # class names
|
738 |
+
self.files = files # image filenames
|
739 |
+
self.times = times # profiling times
|
740 |
+
self.xyxy = pred # xyxy pixels
|
741 |
+
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
742 |
+
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
743 |
+
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
744 |
+
self.n = len(self.pred) # number of images (batch size)
|
745 |
+
self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
|
746 |
+
self.s = tuple(shape) # inference BCHW shape
|
747 |
+
|
748 |
+
def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
|
749 |
+
s, crops = '', []
|
750 |
+
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
|
751 |
+
s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
|
752 |
+
if pred.shape[0]:
|
753 |
+
for c in pred[:, -1].unique():
|
754 |
+
n = (pred[:, -1] == c).sum() # detections per class
|
755 |
+
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
756 |
+
s = s.rstrip(', ')
|
757 |
+
if show or save or render or crop:
|
758 |
+
annotator = Annotator(im, example=str(self.names))
|
759 |
+
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
760 |
+
label = f'{self.names[int(cls)]} {conf:.2f}'
|
761 |
+
if crop:
|
762 |
+
file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
|
763 |
+
crops.append({
|
764 |
+
'box': box,
|
765 |
+
'conf': conf,
|
766 |
+
'cls': cls,
|
767 |
+
'label': label,
|
768 |
+
'im': save_one_box(box, im, file=file, save=save)})
|
769 |
+
else: # all others
|
770 |
+
annotator.box_label(box, label if labels else '', color=colors(cls))
|
771 |
+
im = annotator.im
|
772 |
+
else:
|
773 |
+
s += '(no detections)'
|
774 |
+
|
775 |
+
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
|
776 |
+
if show:
|
777 |
+
if is_jupyter():
|
778 |
+
from IPython.display import display
|
779 |
+
display(im)
|
780 |
+
else:
|
781 |
+
im.show(self.files[i])
|
782 |
+
if save:
|
783 |
+
f = self.files[i]
|
784 |
+
im.save(save_dir / f) # save
|
785 |
+
if i == self.n - 1:
|
786 |
+
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
|
787 |
+
if render:
|
788 |
+
self.ims[i] = np.asarray(im)
|
789 |
+
if pprint:
|
790 |
+
s = s.lstrip('\n')
|
791 |
+
return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
|
792 |
+
if crop:
|
793 |
+
if save:
|
794 |
+
LOGGER.info(f'Saved results to {save_dir}\n')
|
795 |
+
return crops
|
796 |
+
|
797 |
+
@TryExcept('Showing images is not supported in this environment')
|
798 |
+
def show(self, labels=True):
|
799 |
+
self._run(show=True, labels=labels) # show results
|
800 |
+
|
801 |
+
def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
|
802 |
+
save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
|
803 |
+
self._run(save=True, labels=labels, save_dir=save_dir) # save results
|
804 |
+
|
805 |
+
def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
|
806 |
+
save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
|
807 |
+
return self._run(crop=True, save=save, save_dir=save_dir) # crop results
|
808 |
+
|
809 |
+
def render(self, labels=True):
|
810 |
+
self._run(render=True, labels=labels) # render results
|
811 |
+
return self.ims
|
812 |
+
|
813 |
+
def pandas(self):
|
814 |
+
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
|
815 |
+
new = copy(self) # return copy
|
816 |
+
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
|
817 |
+
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
|
818 |
+
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
|
819 |
+
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
|
820 |
+
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
|
821 |
+
return new
|
822 |
+
|
823 |
+
def tolist(self):
|
824 |
+
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
825 |
+
r = range(self.n) # iterable
|
826 |
+
x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
|
827 |
+
# for d in x:
|
828 |
+
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
|
829 |
+
# setattr(d, k, getattr(d, k)[0]) # pop out of list
|
830 |
+
return x
|
831 |
+
|
832 |
+
def print(self):
|
833 |
+
LOGGER.info(self.__str__())
|
834 |
+
|
835 |
+
def __len__(self): # override len(results)
|
836 |
+
return self.n
|
837 |
+
|
838 |
+
def __str__(self): # override print(results)
|
839 |
+
return self._run(pprint=True) # print results
|
840 |
+
|
841 |
+
def __repr__(self):
|
842 |
+
return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
|
843 |
+
|
844 |
+
|
845 |
+
class Proto(nn.Module):
|
846 |
+
# YOLOv5 mask Proto module for segmentation models
|
847 |
+
def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
|
848 |
+
super().__init__()
|
849 |
+
self.cv1 = Conv(c1, c_, k=3)
|
850 |
+
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
|
851 |
+
self.cv2 = Conv(c_, c_, k=3)
|
852 |
+
self.cv3 = Conv(c_, c2)
|
853 |
+
|
854 |
+
def forward(self, x):
|
855 |
+
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
|
856 |
+
|
857 |
+
|
858 |
+
class Classify(nn.Module):
|
859 |
+
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
860 |
+
def __init__(self,
|
861 |
+
c1,
|
862 |
+
c2,
|
863 |
+
k=1,
|
864 |
+
s=1,
|
865 |
+
p=None,
|
866 |
+
g=1,
|
867 |
+
dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
|
868 |
+
super().__init__()
|
869 |
+
c_ = 1280 # efficientnet_b0 size
|
870 |
+
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
|
871 |
+
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
|
872 |
+
self.drop = nn.Dropout(p=dropout_p, inplace=True)
|
873 |
+
self.linear = nn.Linear(c_, c2) # to x(b,c2)
|
874 |
+
|
875 |
+
def forward(self, x):
|
876 |
+
if isinstance(x, list):
|
877 |
+
x = torch.cat(x, 1)
|
878 |
+
return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
|
infer/yolov5/models/experimental.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Experimental modules
|
4 |
+
"""
|
5 |
+
import math
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
|
11 |
+
from infer.yolov5.utils.downloads import attempt_download
|
12 |
+
|
13 |
+
|
14 |
+
class Sum(nn.Module):
|
15 |
+
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
16 |
+
def __init__(self, n, weight=False): # n: number of inputs
|
17 |
+
super().__init__()
|
18 |
+
self.weight = weight # apply weights boolean
|
19 |
+
self.iter = range(n - 1) # iter object
|
20 |
+
if weight:
|
21 |
+
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
|
22 |
+
|
23 |
+
def forward(self, x):
|
24 |
+
y = x[0] # no weight
|
25 |
+
if self.weight:
|
26 |
+
w = torch.sigmoid(self.w) * 2
|
27 |
+
for i in self.iter:
|
28 |
+
y = y + x[i + 1] * w[i]
|
29 |
+
else:
|
30 |
+
for i in self.iter:
|
31 |
+
y = y + x[i + 1]
|
32 |
+
return y
|
33 |
+
|
34 |
+
|
35 |
+
class MixConv2d(nn.Module):
|
36 |
+
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
|
37 |
+
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
|
38 |
+
super().__init__()
|
39 |
+
n = len(k) # number of convolutions
|
40 |
+
if equal_ch: # equal c_ per group
|
41 |
+
i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
|
42 |
+
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
|
43 |
+
else: # equal weight.numel() per group
|
44 |
+
b = [c2] + [0] * n
|
45 |
+
a = np.eye(n + 1, n, k=-1)
|
46 |
+
a -= np.roll(a, 1, axis=1)
|
47 |
+
a *= np.array(k) ** 2
|
48 |
+
a[0] = 1
|
49 |
+
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
50 |
+
|
51 |
+
self.m = nn.ModuleList([
|
52 |
+
nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
|
53 |
+
self.bn = nn.BatchNorm2d(c2)
|
54 |
+
self.act = nn.SiLU()
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
58 |
+
|
59 |
+
|
60 |
+
class Ensemble(nn.ModuleList):
|
61 |
+
# Ensemble of models
|
62 |
+
def __init__(self):
|
63 |
+
super().__init__()
|
64 |
+
|
65 |
+
def forward(self, x, augment=False, profile=False, visualize=False):
|
66 |
+
y = [module(x, augment, profile, visualize)[0] for module in self]
|
67 |
+
# y = torch.stack(y).max(0)[0] # max ensemble
|
68 |
+
# y = torch.stack(y).mean(0) # mean ensemble
|
69 |
+
y = torch.cat(y, 1) # nms ensemble
|
70 |
+
return y, None # inference, train output
|
71 |
+
|
72 |
+
|
73 |
+
def attempt_load(weights, device=None, inplace=True, fuse=True):
|
74 |
+
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
75 |
+
from models.yolo import Detect, Model
|
76 |
+
|
77 |
+
model = Ensemble()
|
78 |
+
for w in weights if isinstance(weights, list) else [weights]:
|
79 |
+
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
|
80 |
+
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
81 |
+
|
82 |
+
# Model compatibility updates
|
83 |
+
if not hasattr(ckpt, 'stride'):
|
84 |
+
ckpt.stride = torch.tensor([32.])
|
85 |
+
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
|
86 |
+
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
|
87 |
+
|
88 |
+
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
89 |
+
|
90 |
+
# Module compatibility updates
|
91 |
+
for m in model.modules():
|
92 |
+
t = type(m)
|
93 |
+
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
94 |
+
m.inplace = inplace # torch 1.7.0 compatibility
|
95 |
+
if t is Detect and not isinstance(m.anchor_grid, list):
|
96 |
+
delattr(m, 'anchor_grid')
|
97 |
+
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
98 |
+
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
|
99 |
+
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
100 |
+
|
101 |
+
# Return model
|
102 |
+
if len(model) == 1:
|
103 |
+
return model[-1]
|
104 |
+
|
105 |
+
# Return detection ensemble
|
106 |
+
print(f'Ensemble created with {weights}\n')
|
107 |
+
for k in 'names', 'nc', 'yaml':
|
108 |
+
setattr(model, k, getattr(model[0], k))
|
109 |
+
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
|
110 |
+
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
|
111 |
+
return model
|
infer/yolov5/models/hub/anchors.yaml
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
# Default anchors for COCO data
|
3 |
+
|
4 |
+
|
5 |
+
# P5 -------------------------------------------------------------------------------------------------------------------
|
6 |
+
# P5-640:
|
7 |
+
anchors_p5_640:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
|
13 |
+
# P6 -------------------------------------------------------------------------------------------------------------------
|
14 |
+
# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
|
15 |
+
anchors_p6_640:
|
16 |
+
- [9,11, 21,19, 17,41] # P3/8
|
17 |
+
- [43,32, 39,70, 86,64] # P4/16
|
18 |
+
- [65,131, 134,130, 120,265] # P5/32
|
19 |
+
- [282,180, 247,354, 512,387] # P6/64
|
20 |
+
|
21 |
+
# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
|
22 |
+
anchors_p6_1280:
|
23 |
+
- [19,27, 44,40, 38,94] # P3/8
|
24 |
+
- [96,68, 86,152, 180,137] # P4/16
|
25 |
+
- [140,301, 303,264, 238,542] # P5/32
|
26 |
+
- [436,615, 739,380, 925,792] # P6/64
|
27 |
+
|
28 |
+
# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
|
29 |
+
anchors_p6_1920:
|
30 |
+
- [28,41, 67,59, 57,141] # P3/8
|
31 |
+
- [144,103, 129,227, 270,205] # P4/16
|
32 |
+
- [209,452, 455,396, 358,812] # P5/32
|
33 |
+
- [653,922, 1109,570, 1387,1187] # P6/64
|
34 |
+
|
35 |
+
|
36 |
+
# P7 -------------------------------------------------------------------------------------------------------------------
|
37 |
+
# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
|
38 |
+
anchors_p7_640:
|
39 |
+
- [11,11, 13,30, 29,20] # P3/8
|
40 |
+
- [30,46, 61,38, 39,92] # P4/16
|
41 |
+
- [78,80, 146,66, 79,163] # P5/32
|
42 |
+
- [149,150, 321,143, 157,303] # P6/64
|
43 |
+
- [257,402, 359,290, 524,372] # P7/128
|
44 |
+
|
45 |
+
# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
|
46 |
+
anchors_p7_1280:
|
47 |
+
- [19,22, 54,36, 32,77] # P3/8
|
48 |
+
- [70,83, 138,71, 75,173] # P4/16
|
49 |
+
- [165,159, 148,334, 375,151] # P5/32
|
50 |
+
- [334,317, 251,626, 499,474] # P6/64
|
51 |
+
- [750,326, 534,814, 1079,818] # P7/128
|
52 |
+
|
53 |
+
# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
|
54 |
+
anchors_p7_1920:
|
55 |
+
- [29,34, 81,55, 47,115] # P3/8
|
56 |
+
- [105,124, 207,107, 113,259] # P4/16
|
57 |
+
- [247,238, 222,500, 563,227] # P5/32
|
58 |
+
- [501,476, 376,939, 749,711] # P6/64
|
59 |
+
- [1126,489, 801,1222, 1618,1227] # P7/128
|
infer/yolov5/models/hub/yolov3-spp.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# darknet53 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [32, 3, 1]], # 0
|
16 |
+
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
|
17 |
+
[-1, 1, Bottleneck, [64]],
|
18 |
+
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
|
19 |
+
[-1, 2, Bottleneck, [128]],
|
20 |
+
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
|
21 |
+
[-1, 8, Bottleneck, [256]],
|
22 |
+
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
|
23 |
+
[-1, 8, Bottleneck, [512]],
|
24 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
|
25 |
+
[-1, 4, Bottleneck, [1024]], # 10
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv3-SPP head
|
29 |
+
head:
|
30 |
+
[[-1, 1, Bottleneck, [1024, False]],
|
31 |
+
[-1, 1, SPP, [512, [5, 9, 13]]],
|
32 |
+
[-1, 1, Conv, [1024, 3, 1]],
|
33 |
+
[-1, 1, Conv, [512, 1, 1]],
|
34 |
+
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
|
35 |
+
|
36 |
+
[-2, 1, Conv, [256, 1, 1]],
|
37 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
38 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
39 |
+
[-1, 1, Bottleneck, [512, False]],
|
40 |
+
[-1, 1, Bottleneck, [512, False]],
|
41 |
+
[-1, 1, Conv, [256, 1, 1]],
|
42 |
+
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
|
43 |
+
|
44 |
+
[-2, 1, Conv, [128, 1, 1]],
|
45 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
46 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P3
|
47 |
+
[-1, 1, Bottleneck, [256, False]],
|
48 |
+
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
|
49 |
+
|
50 |
+
[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
51 |
+
]
|
infer/yolov5/models/hub/yolov3-tiny.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,14, 23,27, 37,58] # P4/16
|
9 |
+
- [81,82, 135,169, 344,319] # P5/32
|
10 |
+
|
11 |
+
# YOLOv3-tiny backbone
|
12 |
+
backbone:
|
13 |
+
# [from, number, module, args]
|
14 |
+
[[-1, 1, Conv, [16, 3, 1]], # 0
|
15 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
|
16 |
+
[-1, 1, Conv, [32, 3, 1]],
|
17 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
|
18 |
+
[-1, 1, Conv, [64, 3, 1]],
|
19 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
|
20 |
+
[-1, 1, Conv, [128, 3, 1]],
|
21 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
|
22 |
+
[-1, 1, Conv, [256, 3, 1]],
|
23 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
|
24 |
+
[-1, 1, Conv, [512, 3, 1]],
|
25 |
+
[-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
|
26 |
+
[-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
|
27 |
+
]
|
28 |
+
|
29 |
+
# YOLOv3-tiny head
|
30 |
+
head:
|
31 |
+
[[-1, 1, Conv, [1024, 3, 1]],
|
32 |
+
[-1, 1, Conv, [256, 1, 1]],
|
33 |
+
[-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
|
34 |
+
|
35 |
+
[-2, 1, Conv, [128, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
38 |
+
[-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
|
39 |
+
|
40 |
+
[[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
|
41 |
+
]
|
infer/yolov5/models/hub/yolov3.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# darknet53 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [32, 3, 1]], # 0
|
16 |
+
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
|
17 |
+
[-1, 1, Bottleneck, [64]],
|
18 |
+
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
|
19 |
+
[-1, 2, Bottleneck, [128]],
|
20 |
+
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
|
21 |
+
[-1, 8, Bottleneck, [256]],
|
22 |
+
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
|
23 |
+
[-1, 8, Bottleneck, [512]],
|
24 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
|
25 |
+
[-1, 4, Bottleneck, [1024]], # 10
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv3 head
|
29 |
+
head:
|
30 |
+
[[-1, 1, Bottleneck, [1024, False]],
|
31 |
+
[-1, 1, Conv, [512, 1, 1]],
|
32 |
+
[-1, 1, Conv, [1024, 3, 1]],
|
33 |
+
[-1, 1, Conv, [512, 1, 1]],
|
34 |
+
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
|
35 |
+
|
36 |
+
[-2, 1, Conv, [256, 1, 1]],
|
37 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
38 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
39 |
+
[-1, 1, Bottleneck, [512, False]],
|
40 |
+
[-1, 1, Bottleneck, [512, False]],
|
41 |
+
[-1, 1, Conv, [256, 1, 1]],
|
42 |
+
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
|
43 |
+
|
44 |
+
[-2, 1, Conv, [128, 1, 1]],
|
45 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
46 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P3
|
47 |
+
[-1, 1, Bottleneck, [256, False]],
|
48 |
+
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
|
49 |
+
|
50 |
+
[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
51 |
+
]
|
infer/yolov5/models/hub/yolov5-bifpn.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 BiFPN head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/hub/yolov5-fpn.yaml
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 FPN head
|
28 |
+
head:
|
29 |
+
[[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
|
30 |
+
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 1, Conv, [512, 1, 1]],
|
34 |
+
[-1, 3, C3, [512, False]], # 14 (P4/16-medium)
|
35 |
+
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
+
[-1, 1, Conv, [256, 1, 1]],
|
39 |
+
[-1, 3, C3, [256, False]], # 18 (P3/8-small)
|
40 |
+
|
41 |
+
[[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
42 |
+
]
|
infer/yolov5/models/hub/yolov5-p2.yaml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
8 |
+
|
9 |
+
# YOLOv5 v6.0 backbone
|
10 |
+
backbone:
|
11 |
+
# [from, number, module, args]
|
12 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
13 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
14 |
+
[-1, 3, C3, [128]],
|
15 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
16 |
+
[-1, 6, C3, [256]],
|
17 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
18 |
+
[-1, 9, C3, [512]],
|
19 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
20 |
+
[-1, 3, C3, [1024]],
|
21 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
22 |
+
]
|
23 |
+
|
24 |
+
# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
|
25 |
+
head:
|
26 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
27 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
28 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
29 |
+
[-1, 3, C3, [512, False]], # 13
|
30 |
+
|
31 |
+
[-1, 1, Conv, [256, 1, 1]],
|
32 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
33 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
34 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
35 |
+
|
36 |
+
[-1, 1, Conv, [128, 1, 1]],
|
37 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
38 |
+
[[-1, 2], 1, Concat, [1]], # cat backbone P2
|
39 |
+
[-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
|
40 |
+
|
41 |
+
[-1, 1, Conv, [128, 3, 2]],
|
42 |
+
[[-1, 18], 1, Concat, [1]], # cat head P3
|
43 |
+
[-1, 3, C3, [256, False]], # 24 (P3/8-small)
|
44 |
+
|
45 |
+
[-1, 1, Conv, [256, 3, 2]],
|
46 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
47 |
+
[-1, 3, C3, [512, False]], # 27 (P4/16-medium)
|
48 |
+
|
49 |
+
[-1, 1, Conv, [512, 3, 2]],
|
50 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
51 |
+
[-1, 3, C3, [1024, False]], # 30 (P5/32-large)
|
52 |
+
|
53 |
+
[[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
|
54 |
+
]
|
infer/yolov5/models/hub/yolov5-p34.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.50 # layer channel multiple
|
7 |
+
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
8 |
+
|
9 |
+
# YOLOv5 v6.0 backbone
|
10 |
+
backbone:
|
11 |
+
# [from, number, module, args]
|
12 |
+
[ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
|
13 |
+
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
14 |
+
[ -1, 3, C3, [ 128 ] ],
|
15 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
16 |
+
[ -1, 6, C3, [ 256 ] ],
|
17 |
+
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
18 |
+
[ -1, 9, C3, [ 512 ] ],
|
19 |
+
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
20 |
+
[ -1, 3, C3, [ 1024 ] ],
|
21 |
+
[ -1, 1, SPPF, [ 1024, 5 ] ], # 9
|
22 |
+
]
|
23 |
+
|
24 |
+
# YOLOv5 v6.0 head with (P3, P4) outputs
|
25 |
+
head:
|
26 |
+
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
27 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
28 |
+
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
29 |
+
[ -1, 3, C3, [ 512, False ] ], # 13
|
30 |
+
|
31 |
+
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
32 |
+
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
33 |
+
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
34 |
+
[ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
|
35 |
+
|
36 |
+
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
37 |
+
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
38 |
+
[ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
|
39 |
+
|
40 |
+
[ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
|
41 |
+
]
|
infer/yolov5/models/hub/yolov5-p6.yaml
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
8 |
+
|
9 |
+
# YOLOv5 v6.0 backbone
|
10 |
+
backbone:
|
11 |
+
# [from, number, module, args]
|
12 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
13 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
14 |
+
[-1, 3, C3, [128]],
|
15 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
16 |
+
[-1, 6, C3, [256]],
|
17 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
18 |
+
[-1, 9, C3, [512]],
|
19 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
20 |
+
[-1, 3, C3, [768]],
|
21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
22 |
+
[-1, 3, C3, [1024]],
|
23 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
24 |
+
]
|
25 |
+
|
26 |
+
# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
|
27 |
+
head:
|
28 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
29 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
30 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
31 |
+
[-1, 3, C3, [768, False]], # 15
|
32 |
+
|
33 |
+
[-1, 1, Conv, [512, 1, 1]],
|
34 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
35 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
36 |
+
[-1, 3, C3, [512, False]], # 19
|
37 |
+
|
38 |
+
[-1, 1, Conv, [256, 1, 1]],
|
39 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
40 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
41 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [256, 3, 2]],
|
44 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
45 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [512, 3, 2]],
|
48 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
49 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [768, 3, 2]],
|
52 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
53 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
54 |
+
|
55 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
56 |
+
]
|
infer/yolov5/models/hub/yolov5-p7.yaml
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
8 |
+
|
9 |
+
# YOLOv5 v6.0 backbone
|
10 |
+
backbone:
|
11 |
+
# [from, number, module, args]
|
12 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
13 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
14 |
+
[-1, 3, C3, [128]],
|
15 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
16 |
+
[-1, 6, C3, [256]],
|
17 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
18 |
+
[-1, 9, C3, [512]],
|
19 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
20 |
+
[-1, 3, C3, [768]],
|
21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
22 |
+
[-1, 3, C3, [1024]],
|
23 |
+
[-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
|
24 |
+
[-1, 3, C3, [1280]],
|
25 |
+
[-1, 1, SPPF, [1280, 5]], # 13
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
|
29 |
+
head:
|
30 |
+
[[-1, 1, Conv, [1024, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 10], 1, Concat, [1]], # cat backbone P6
|
33 |
+
[-1, 3, C3, [1024, False]], # 17
|
34 |
+
|
35 |
+
[-1, 1, Conv, [768, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
38 |
+
[-1, 3, C3, [768, False]], # 21
|
39 |
+
|
40 |
+
[-1, 1, Conv, [512, 1, 1]],
|
41 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
42 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
43 |
+
[-1, 3, C3, [512, False]], # 25
|
44 |
+
|
45 |
+
[-1, 1, Conv, [256, 1, 1]],
|
46 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
47 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
48 |
+
[-1, 3, C3, [256, False]], # 29 (P3/8-small)
|
49 |
+
|
50 |
+
[-1, 1, Conv, [256, 3, 2]],
|
51 |
+
[[-1, 26], 1, Concat, [1]], # cat head P4
|
52 |
+
[-1, 3, C3, [512, False]], # 32 (P4/16-medium)
|
53 |
+
|
54 |
+
[-1, 1, Conv, [512, 3, 2]],
|
55 |
+
[[-1, 22], 1, Concat, [1]], # cat head P5
|
56 |
+
[-1, 3, C3, [768, False]], # 35 (P5/32-large)
|
57 |
+
|
58 |
+
[-1, 1, Conv, [768, 3, 2]],
|
59 |
+
[[-1, 18], 1, Concat, [1]], # cat head P6
|
60 |
+
[-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
|
61 |
+
|
62 |
+
[-1, 1, Conv, [1024, 3, 2]],
|
63 |
+
[[-1, 14], 1, Concat, [1]], # cat head P7
|
64 |
+
[-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
|
65 |
+
|
66 |
+
[[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
|
67 |
+
]
|
infer/yolov5/models/hub/yolov5-panet.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 PANet head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/hub/yolov5l6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [19,27, 44,40, 38,94] # P3/8
|
9 |
+
- [96,68, 86,152, 180,137] # P4/16
|
10 |
+
- [140,301, 303,264, 238,542] # P5/32
|
11 |
+
- [436,615, 739,380, 925,792] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [768]],
|
25 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
26 |
+
[-1, 3, C3, [1024]],
|
27 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
28 |
+
]
|
29 |
+
|
30 |
+
# YOLOv5 v6.0 head
|
31 |
+
head:
|
32 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
33 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
34 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
35 |
+
[-1, 3, C3, [768, False]], # 15
|
36 |
+
|
37 |
+
[-1, 1, Conv, [512, 1, 1]],
|
38 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19
|
41 |
+
|
42 |
+
[-1, 1, Conv, [256, 1, 1]],
|
43 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
44 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
45 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [256, 3, 2]],
|
48 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
49 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [512, 3, 2]],
|
52 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
53 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
54 |
+
|
55 |
+
[-1, 1, Conv, [768, 3, 2]],
|
56 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
57 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
58 |
+
|
59 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
60 |
+
]
|
infer/yolov5/models/hub/yolov5m6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.67 # model depth multiple
|
6 |
+
width_multiple: 0.75 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [19,27, 44,40, 38,94] # P3/8
|
9 |
+
- [96,68, 86,152, 180,137] # P4/16
|
10 |
+
- [140,301, 303,264, 238,542] # P5/32
|
11 |
+
- [436,615, 739,380, 925,792] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [768]],
|
25 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
26 |
+
[-1, 3, C3, [1024]],
|
27 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
28 |
+
]
|
29 |
+
|
30 |
+
# YOLOv5 v6.0 head
|
31 |
+
head:
|
32 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
33 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
34 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
35 |
+
[-1, 3, C3, [768, False]], # 15
|
36 |
+
|
37 |
+
[-1, 1, Conv, [512, 1, 1]],
|
38 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19
|
41 |
+
|
42 |
+
[-1, 1, Conv, [256, 1, 1]],
|
43 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
44 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
45 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [256, 3, 2]],
|
48 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
49 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [512, 3, 2]],
|
52 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
53 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
54 |
+
|
55 |
+
[-1, 1, Conv, [768, 3, 2]],
|
56 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
57 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
58 |
+
|
59 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
60 |
+
]
|
infer/yolov5/models/hub/yolov5n6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [19,27, 44,40, 38,94] # P3/8
|
9 |
+
- [96,68, 86,152, 180,137] # P4/16
|
10 |
+
- [140,301, 303,264, 238,542] # P5/32
|
11 |
+
- [436,615, 739,380, 925,792] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [768]],
|
25 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
26 |
+
[-1, 3, C3, [1024]],
|
27 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
28 |
+
]
|
29 |
+
|
30 |
+
# YOLOv5 v6.0 head
|
31 |
+
head:
|
32 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
33 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
34 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
35 |
+
[-1, 3, C3, [768, False]], # 15
|
36 |
+
|
37 |
+
[-1, 1, Conv, [512, 1, 1]],
|
38 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19
|
41 |
+
|
42 |
+
[-1, 1, Conv, [256, 1, 1]],
|
43 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
44 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
45 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [256, 3, 2]],
|
48 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
49 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [512, 3, 2]],
|
52 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
53 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
54 |
+
|
55 |
+
[-1, 1, Conv, [768, 3, 2]],
|
56 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
57 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
58 |
+
|
59 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
60 |
+
]
|
infer/yolov5/models/hub/yolov5s-LeakyReLU.yaml
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
|
6 |
+
depth_multiple: 0.33 # model depth multiple
|
7 |
+
width_multiple: 0.50 # layer channel multiple
|
8 |
+
anchors:
|
9 |
+
- [10,13, 16,30, 33,23] # P3/8
|
10 |
+
- [30,61, 62,45, 59,119] # P4/16
|
11 |
+
- [116,90, 156,198, 373,326] # P5/32
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [1024]],
|
25 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv5 v6.0 head
|
29 |
+
head:
|
30 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
31 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
32 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
33 |
+
[-1, 3, C3, [512, False]], # 13
|
34 |
+
|
35 |
+
[-1, 1, Conv, [256, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
38 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
39 |
+
|
40 |
+
[-1, 1, Conv, [256, 3, 2]],
|
41 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
42 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
43 |
+
|
44 |
+
[-1, 1, Conv, [512, 3, 2]],
|
45 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
46 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
47 |
+
|
48 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
49 |
+
]
|
infer/yolov5/models/hub/yolov5s-ghost.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.50 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3Ghost, [128]],
|
18 |
+
[-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3Ghost, [256]],
|
20 |
+
[-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3Ghost, [512]],
|
22 |
+
[-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3Ghost, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, GhostConv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3Ghost, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, GhostConv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, GhostConv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, GhostConv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/hub/yolov5s-transformer.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.50 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/hub/yolov5s6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.50 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [19,27, 44,40, 38,94] # P3/8
|
9 |
+
- [96,68, 86,152, 180,137] # P4/16
|
10 |
+
- [140,301, 303,264, 238,542] # P5/32
|
11 |
+
- [436,615, 739,380, 925,792] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [768]],
|
25 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
26 |
+
[-1, 3, C3, [1024]],
|
27 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
28 |
+
]
|
29 |
+
|
30 |
+
# YOLOv5 v6.0 head
|
31 |
+
head:
|
32 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
33 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
34 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
35 |
+
[-1, 3, C3, [768, False]], # 15
|
36 |
+
|
37 |
+
[-1, 1, Conv, [512, 1, 1]],
|
38 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19
|
41 |
+
|
42 |
+
[-1, 1, Conv, [256, 1, 1]],
|
43 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
44 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
45 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [256, 3, 2]],
|
48 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
49 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [512, 3, 2]],
|
52 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
53 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
54 |
+
|
55 |
+
[-1, 1, Conv, [768, 3, 2]],
|
56 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
57 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
58 |
+
|
59 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
60 |
+
]
|
infer/yolov5/models/hub/yolov5x6.yaml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.33 # model depth multiple
|
6 |
+
width_multiple: 1.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [19,27, 44,40, 38,94] # P3/8
|
9 |
+
- [96,68, 86,152, 180,137] # P4/16
|
10 |
+
- [140,301, 303,264, 238,542] # P5/32
|
11 |
+
- [436,615, 739,380, 925,792] # P6/64
|
12 |
+
|
13 |
+
# YOLOv5 v6.0 backbone
|
14 |
+
backbone:
|
15 |
+
# [from, number, module, args]
|
16 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
17 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
18 |
+
[-1, 3, C3, [128]],
|
19 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
20 |
+
[-1, 6, C3, [256]],
|
21 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
22 |
+
[-1, 9, C3, [512]],
|
23 |
+
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
24 |
+
[-1, 3, C3, [768]],
|
25 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
26 |
+
[-1, 3, C3, [1024]],
|
27 |
+
[-1, 1, SPPF, [1024, 5]], # 11
|
28 |
+
]
|
29 |
+
|
30 |
+
# YOLOv5 v6.0 head
|
31 |
+
head:
|
32 |
+
[[-1, 1, Conv, [768, 1, 1]],
|
33 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
34 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
35 |
+
[-1, 3, C3, [768, False]], # 15
|
36 |
+
|
37 |
+
[-1, 1, Conv, [512, 1, 1]],
|
38 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
39 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
40 |
+
[-1, 3, C3, [512, False]], # 19
|
41 |
+
|
42 |
+
[-1, 1, Conv, [256, 1, 1]],
|
43 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
44 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
45 |
+
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
46 |
+
|
47 |
+
[-1, 1, Conv, [256, 3, 2]],
|
48 |
+
[[-1, 20], 1, Concat, [1]], # cat head P4
|
49 |
+
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
50 |
+
|
51 |
+
[-1, 1, Conv, [512, 3, 2]],
|
52 |
+
[[-1, 16], 1, Concat, [1]], # cat head P5
|
53 |
+
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
54 |
+
|
55 |
+
[-1, 1, Conv, [768, 3, 2]],
|
56 |
+
[[-1, 12], 1, Concat, [1]], # cat head P6
|
57 |
+
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
58 |
+
|
59 |
+
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
60 |
+
]
|
infer/yolov5/models/segment/yolov5l-seg.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/segment/yolov5m-seg.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.67 # model depth multiple
|
6 |
+
width_multiple: 0.75 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/segment/yolov5n-seg.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/segment/yolov5s-seg.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.5 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/segment/yolov5x-seg.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.33 # model depth multiple
|
6 |
+
width_multiple: 1.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/tf.py
ADDED
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
TensorFlow, Keras and TFLite versions of YOLOv5
|
4 |
+
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
5 |
+
|
6 |
+
Usage:
|
7 |
+
$ python models/tf.py --weights yolov5s.pt
|
8 |
+
|
9 |
+
Export:
|
10 |
+
$ yolov5 export --weights yolov5s.pt --include saved_model pb tflite tfjs
|
11 |
+
"""
|
12 |
+
|
13 |
+
import argparse
|
14 |
+
import sys
|
15 |
+
from copy import deepcopy
|
16 |
+
from pathlib import Path
|
17 |
+
|
18 |
+
FILE = Path(__file__).resolve()
|
19 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
20 |
+
if str(ROOT) not in sys.path:
|
21 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
22 |
+
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
23 |
+
|
24 |
+
import numpy as np
|
25 |
+
import tensorflow as tf
|
26 |
+
import torch
|
27 |
+
import torch.nn as nn
|
28 |
+
from tensorflow import keras
|
29 |
+
|
30 |
+
from infer.yolov5.models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
|
31 |
+
DWConvTranspose2d, Focus, autopad)
|
32 |
+
from infer.yolov5.models.experimental import MixConv2d, attempt_load
|
33 |
+
from infer.yolov5.models.yolo import Detect, Segment
|
34 |
+
from infer.yolov5.utils.activations import SiLU
|
35 |
+
from infer.yolov5.utils.general import LOGGER, make_divisible, print_args
|
36 |
+
|
37 |
+
|
38 |
+
class TFBN(keras.layers.Layer):
|
39 |
+
# TensorFlow BatchNormalization wrapper
|
40 |
+
def __init__(self, w=None):
|
41 |
+
super().__init__()
|
42 |
+
self.bn = keras.layers.BatchNormalization(
|
43 |
+
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
44 |
+
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
45 |
+
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
|
46 |
+
moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
|
47 |
+
epsilon=w.eps)
|
48 |
+
|
49 |
+
def call(self, inputs):
|
50 |
+
return self.bn(inputs)
|
51 |
+
|
52 |
+
|
53 |
+
class TFPad(keras.layers.Layer):
|
54 |
+
# Pad inputs in spatial dimensions 1 and 2
|
55 |
+
def __init__(self, pad):
|
56 |
+
super().__init__()
|
57 |
+
if isinstance(pad, int):
|
58 |
+
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
59 |
+
else: # tuple/list
|
60 |
+
self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
|
61 |
+
|
62 |
+
def call(self, inputs):
|
63 |
+
return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
|
64 |
+
|
65 |
+
|
66 |
+
class TFConv(keras.layers.Layer):
|
67 |
+
# Standard convolution
|
68 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
69 |
+
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
70 |
+
super().__init__()
|
71 |
+
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
72 |
+
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
73 |
+
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
|
74 |
+
conv = keras.layers.Conv2D(
|
75 |
+
filters=c2,
|
76 |
+
kernel_size=k,
|
77 |
+
strides=s,
|
78 |
+
padding='SAME' if s == 1 else 'VALID',
|
79 |
+
use_bias=not hasattr(w, 'bn'),
|
80 |
+
kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
81 |
+
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
82 |
+
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
83 |
+
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
84 |
+
self.act = activations(w.act) if act else tf.identity
|
85 |
+
|
86 |
+
def call(self, inputs):
|
87 |
+
return self.act(self.bn(self.conv(inputs)))
|
88 |
+
|
89 |
+
|
90 |
+
class TFDWConv(keras.layers.Layer):
|
91 |
+
# Depthwise convolution
|
92 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
|
93 |
+
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
94 |
+
super().__init__()
|
95 |
+
assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
|
96 |
+
conv = keras.layers.DepthwiseConv2D(
|
97 |
+
kernel_size=k,
|
98 |
+
depth_multiplier=c2 // c1,
|
99 |
+
strides=s,
|
100 |
+
padding='SAME' if s == 1 else 'VALID',
|
101 |
+
use_bias=not hasattr(w, 'bn'),
|
102 |
+
depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
103 |
+
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
104 |
+
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
105 |
+
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
106 |
+
self.act = activations(w.act) if act else tf.identity
|
107 |
+
|
108 |
+
def call(self, inputs):
|
109 |
+
return self.act(self.bn(self.conv(inputs)))
|
110 |
+
|
111 |
+
|
112 |
+
class TFDWConvTranspose2d(keras.layers.Layer):
|
113 |
+
# Depthwise ConvTranspose2d
|
114 |
+
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
|
115 |
+
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
116 |
+
super().__init__()
|
117 |
+
assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
|
118 |
+
assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
|
119 |
+
weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
|
120 |
+
self.c1 = c1
|
121 |
+
self.conv = [
|
122 |
+
keras.layers.Conv2DTranspose(filters=1,
|
123 |
+
kernel_size=k,
|
124 |
+
strides=s,
|
125 |
+
padding='VALID',
|
126 |
+
output_padding=p2,
|
127 |
+
use_bias=True,
|
128 |
+
kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
|
129 |
+
bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
|
130 |
+
|
131 |
+
def call(self, inputs):
|
132 |
+
return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
|
133 |
+
|
134 |
+
|
135 |
+
class TFFocus(keras.layers.Layer):
|
136 |
+
# Focus wh information into c-space
|
137 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
138 |
+
# ch_in, ch_out, kernel, stride, padding, groups
|
139 |
+
super().__init__()
|
140 |
+
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
141 |
+
|
142 |
+
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
143 |
+
# inputs = inputs / 255 # normalize 0-255 to 0-1
|
144 |
+
inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
|
145 |
+
return self.conv(tf.concat(inputs, 3))
|
146 |
+
|
147 |
+
|
148 |
+
class TFBottleneck(keras.layers.Layer):
|
149 |
+
# Standard bottleneck
|
150 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
151 |
+
super().__init__()
|
152 |
+
c_ = int(c2 * e) # hidden channels
|
153 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
154 |
+
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
155 |
+
self.add = shortcut and c1 == c2
|
156 |
+
|
157 |
+
def call(self, inputs):
|
158 |
+
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
159 |
+
|
160 |
+
|
161 |
+
class TFCrossConv(keras.layers.Layer):
|
162 |
+
# Cross Convolution
|
163 |
+
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
|
164 |
+
super().__init__()
|
165 |
+
c_ = int(c2 * e) # hidden channels
|
166 |
+
self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
|
167 |
+
self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
|
168 |
+
self.add = shortcut and c1 == c2
|
169 |
+
|
170 |
+
def call(self, inputs):
|
171 |
+
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
172 |
+
|
173 |
+
|
174 |
+
class TFConv2d(keras.layers.Layer):
|
175 |
+
# Substitution for PyTorch nn.Conv2D
|
176 |
+
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
177 |
+
super().__init__()
|
178 |
+
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
179 |
+
self.conv = keras.layers.Conv2D(filters=c2,
|
180 |
+
kernel_size=k,
|
181 |
+
strides=s,
|
182 |
+
padding='VALID',
|
183 |
+
use_bias=bias,
|
184 |
+
kernel_initializer=keras.initializers.Constant(
|
185 |
+
w.weight.permute(2, 3, 1, 0).numpy()),
|
186 |
+
bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
|
187 |
+
|
188 |
+
def call(self, inputs):
|
189 |
+
return self.conv(inputs)
|
190 |
+
|
191 |
+
|
192 |
+
class TFBottleneckCSP(keras.layers.Layer):
|
193 |
+
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
194 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
195 |
+
# ch_in, ch_out, number, shortcut, groups, expansion
|
196 |
+
super().__init__()
|
197 |
+
c_ = int(c2 * e) # hidden channels
|
198 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
199 |
+
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
200 |
+
self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
|
201 |
+
self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
|
202 |
+
self.bn = TFBN(w.bn)
|
203 |
+
self.act = lambda x: keras.activations.swish(x)
|
204 |
+
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
205 |
+
|
206 |
+
def call(self, inputs):
|
207 |
+
y1 = self.cv3(self.m(self.cv1(inputs)))
|
208 |
+
y2 = self.cv2(inputs)
|
209 |
+
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
|
210 |
+
|
211 |
+
|
212 |
+
class TFC3(keras.layers.Layer):
|
213 |
+
# CSP Bottleneck with 3 convolutions
|
214 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
215 |
+
# ch_in, ch_out, number, shortcut, groups, expansion
|
216 |
+
super().__init__()
|
217 |
+
c_ = int(c2 * e) # hidden channels
|
218 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
219 |
+
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
220 |
+
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
221 |
+
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
222 |
+
|
223 |
+
def call(self, inputs):
|
224 |
+
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
225 |
+
|
226 |
+
|
227 |
+
class TFC3x(keras.layers.Layer):
|
228 |
+
# 3 module with cross-convolutions
|
229 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
230 |
+
# ch_in, ch_out, number, shortcut, groups, expansion
|
231 |
+
super().__init__()
|
232 |
+
c_ = int(c2 * e) # hidden channels
|
233 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
234 |
+
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
235 |
+
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
236 |
+
self.m = keras.Sequential([
|
237 |
+
TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
|
238 |
+
|
239 |
+
def call(self, inputs):
|
240 |
+
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
241 |
+
|
242 |
+
|
243 |
+
class TFSPP(keras.layers.Layer):
|
244 |
+
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
245 |
+
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
246 |
+
super().__init__()
|
247 |
+
c_ = c1 // 2 # hidden channels
|
248 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
249 |
+
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
250 |
+
self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
|
251 |
+
|
252 |
+
def call(self, inputs):
|
253 |
+
x = self.cv1(inputs)
|
254 |
+
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
|
255 |
+
|
256 |
+
|
257 |
+
class TFSPPF(keras.layers.Layer):
|
258 |
+
# Spatial pyramid pooling-Fast layer
|
259 |
+
def __init__(self, c1, c2, k=5, w=None):
|
260 |
+
super().__init__()
|
261 |
+
c_ = c1 // 2 # hidden channels
|
262 |
+
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
263 |
+
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
264 |
+
self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
|
265 |
+
|
266 |
+
def call(self, inputs):
|
267 |
+
x = self.cv1(inputs)
|
268 |
+
y1 = self.m(x)
|
269 |
+
y2 = self.m(y1)
|
270 |
+
return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
|
271 |
+
|
272 |
+
|
273 |
+
class TFDetect(keras.layers.Layer):
|
274 |
+
# TF YOLOv5 Detect layer
|
275 |
+
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
276 |
+
super().__init__()
|
277 |
+
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
278 |
+
self.nc = nc # number of classes
|
279 |
+
self.no = nc + 5 # number of outputs per anchor
|
280 |
+
self.nl = len(anchors) # number of detection layers
|
281 |
+
self.na = len(anchors[0]) // 2 # number of anchors
|
282 |
+
self.grid = [tf.zeros(1)] * self.nl # init grid
|
283 |
+
self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
|
284 |
+
self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
|
285 |
+
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
|
286 |
+
self.training = False # set to False after building model
|
287 |
+
self.imgsz = imgsz
|
288 |
+
for i in range(self.nl):
|
289 |
+
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
290 |
+
self.grid[i] = self._make_grid(nx, ny)
|
291 |
+
|
292 |
+
def call(self, inputs):
|
293 |
+
z = [] # inference output
|
294 |
+
x = []
|
295 |
+
for i in range(self.nl):
|
296 |
+
x.append(self.m[i](inputs[i]))
|
297 |
+
# x(bs,20,20,255) to x(bs,3,20,20,85)
|
298 |
+
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
299 |
+
x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
|
300 |
+
|
301 |
+
if not self.training: # inference
|
302 |
+
y = x[i]
|
303 |
+
grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
|
304 |
+
anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
|
305 |
+
xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
|
306 |
+
wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
|
307 |
+
# Normalize xywh to 0-1 to reduce calibration error
|
308 |
+
xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
309 |
+
wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
310 |
+
y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
|
311 |
+
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
312 |
+
|
313 |
+
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
|
314 |
+
|
315 |
+
@staticmethod
|
316 |
+
def _make_grid(nx=20, ny=20):
|
317 |
+
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
318 |
+
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
319 |
+
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
|
320 |
+
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
|
321 |
+
|
322 |
+
|
323 |
+
class TFSegment(TFDetect):
|
324 |
+
# YOLOv5 Segment head for segmentation models
|
325 |
+
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
|
326 |
+
super().__init__(nc, anchors, ch, imgsz, w)
|
327 |
+
self.nm = nm # number of masks
|
328 |
+
self.npr = npr # number of protos
|
329 |
+
self.no = 5 + nc + self.nm # number of outputs per anchor
|
330 |
+
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
|
331 |
+
self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
|
332 |
+
self.detect = TFDetect.call
|
333 |
+
|
334 |
+
def call(self, x):
|
335 |
+
p = self.proto(x[0])
|
336 |
+
# p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
|
337 |
+
p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
|
338 |
+
x = self.detect(self, x)
|
339 |
+
return (x, p) if self.training else (x[0], p)
|
340 |
+
|
341 |
+
|
342 |
+
class TFProto(keras.layers.Layer):
|
343 |
+
|
344 |
+
def __init__(self, c1, c_=256, c2=32, w=None):
|
345 |
+
super().__init__()
|
346 |
+
self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
|
347 |
+
self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
|
348 |
+
self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
|
349 |
+
self.cv3 = TFConv(c_, c2, w=w.cv3)
|
350 |
+
|
351 |
+
def call(self, inputs):
|
352 |
+
return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
|
353 |
+
|
354 |
+
|
355 |
+
class TFUpsample(keras.layers.Layer):
|
356 |
+
# TF version of torch.nn.Upsample()
|
357 |
+
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
358 |
+
super().__init__()
|
359 |
+
assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
|
360 |
+
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
|
361 |
+
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
362 |
+
# with default arguments: align_corners=False, half_pixel_centers=False
|
363 |
+
# self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
|
364 |
+
# size=(x.shape[1] * 2, x.shape[2] * 2))
|
365 |
+
|
366 |
+
def call(self, inputs):
|
367 |
+
return self.upsample(inputs)
|
368 |
+
|
369 |
+
|
370 |
+
class TFConcat(keras.layers.Layer):
|
371 |
+
# TF version of torch.concat()
|
372 |
+
def __init__(self, dimension=1, w=None):
|
373 |
+
super().__init__()
|
374 |
+
assert dimension == 1, 'convert only NCHW to NHWC concat'
|
375 |
+
self.d = 3
|
376 |
+
|
377 |
+
def call(self, inputs):
|
378 |
+
return tf.concat(inputs, self.d)
|
379 |
+
|
380 |
+
|
381 |
+
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
382 |
+
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
383 |
+
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
384 |
+
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
385 |
+
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
386 |
+
|
387 |
+
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
388 |
+
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
389 |
+
m_str = m
|
390 |
+
m = eval(m) if isinstance(m, str) else m # eval strings
|
391 |
+
for j, a in enumerate(args):
|
392 |
+
try:
|
393 |
+
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
394 |
+
except NameError:
|
395 |
+
pass
|
396 |
+
|
397 |
+
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
398 |
+
if m in [
|
399 |
+
nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
|
400 |
+
BottleneckCSP, C3, C3x]:
|
401 |
+
c1, c2 = ch[f], args[0]
|
402 |
+
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
403 |
+
|
404 |
+
args = [c1, c2, *args[1:]]
|
405 |
+
if m in [BottleneckCSP, C3, C3x]:
|
406 |
+
args.insert(2, n)
|
407 |
+
n = 1
|
408 |
+
elif m is nn.BatchNorm2d:
|
409 |
+
args = [ch[f]]
|
410 |
+
elif m is Concat:
|
411 |
+
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
412 |
+
elif m in [Detect, Segment]:
|
413 |
+
args.append([ch[x + 1] for x in f])
|
414 |
+
if isinstance(args[1], int): # number of anchors
|
415 |
+
args[1] = [list(range(args[1] * 2))] * len(f)
|
416 |
+
if m is Segment:
|
417 |
+
args[3] = make_divisible(args[3] * gw, 8)
|
418 |
+
args.append(imgsz)
|
419 |
+
else:
|
420 |
+
c2 = ch[f]
|
421 |
+
|
422 |
+
tf_m = eval('TF' + m_str.replace('nn.', ''))
|
423 |
+
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
424 |
+
else tf_m(*args, w=model.model[i]) # module
|
425 |
+
|
426 |
+
torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
427 |
+
t = str(m)[8:-2].replace('__main__.', '') # module type
|
428 |
+
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
429 |
+
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
430 |
+
LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
|
431 |
+
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
432 |
+
layers.append(m_)
|
433 |
+
ch.append(c2)
|
434 |
+
return keras.Sequential(layers), sorted(save)
|
435 |
+
|
436 |
+
|
437 |
+
class TFModel:
|
438 |
+
# TF YOLOv5 model
|
439 |
+
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
440 |
+
super().__init__()
|
441 |
+
if isinstance(cfg, dict):
|
442 |
+
self.yaml = cfg # model dict
|
443 |
+
else: # is *.yaml
|
444 |
+
import yaml # for torch hub
|
445 |
+
self.yaml_file = Path(cfg).name
|
446 |
+
with open(cfg) as f:
|
447 |
+
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
448 |
+
|
449 |
+
# Define model
|
450 |
+
if nc and nc != self.yaml['nc']:
|
451 |
+
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
452 |
+
self.yaml['nc'] = nc # override yaml value
|
453 |
+
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
454 |
+
|
455 |
+
def predict(self,
|
456 |
+
inputs,
|
457 |
+
tf_nms=False,
|
458 |
+
agnostic_nms=False,
|
459 |
+
topk_per_class=100,
|
460 |
+
topk_all=100,
|
461 |
+
iou_thres=0.45,
|
462 |
+
conf_thres=0.25):
|
463 |
+
y = [] # outputs
|
464 |
+
x = inputs
|
465 |
+
for m in self.model.layers:
|
466 |
+
if m.f != -1: # if not from previous layer
|
467 |
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
468 |
+
|
469 |
+
x = m(x) # run
|
470 |
+
y.append(x if m.i in self.savelist else None) # save output
|
471 |
+
|
472 |
+
# Add TensorFlow NMS
|
473 |
+
if tf_nms:
|
474 |
+
boxes = self._xywh2xyxy(x[0][..., :4])
|
475 |
+
probs = x[0][:, :, 4:5]
|
476 |
+
classes = x[0][:, :, 5:]
|
477 |
+
scores = probs * classes
|
478 |
+
if agnostic_nms:
|
479 |
+
nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
|
480 |
+
else:
|
481 |
+
boxes = tf.expand_dims(boxes, 2)
|
482 |
+
nms = tf.image.combined_non_max_suppression(boxes,
|
483 |
+
scores,
|
484 |
+
topk_per_class,
|
485 |
+
topk_all,
|
486 |
+
iou_thres,
|
487 |
+
conf_thres,
|
488 |
+
clip_boxes=False)
|
489 |
+
return (nms,)
|
490 |
+
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
491 |
+
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
492 |
+
# xywh = x[..., :4] # x(6300,4) boxes
|
493 |
+
# conf = x[..., 4:5] # x(6300,1) confidences
|
494 |
+
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
|
495 |
+
# return tf.concat([conf, cls, xywh], 1)
|
496 |
+
|
497 |
+
@staticmethod
|
498 |
+
def _xywh2xyxy(xywh):
|
499 |
+
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
500 |
+
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
|
501 |
+
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
|
502 |
+
|
503 |
+
|
504 |
+
class AgnosticNMS(keras.layers.Layer):
|
505 |
+
# TF Agnostic NMS
|
506 |
+
def call(self, input, topk_all, iou_thres, conf_thres):
|
507 |
+
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
|
508 |
+
return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
|
509 |
+
input,
|
510 |
+
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
|
511 |
+
name='agnostic_nms')
|
512 |
+
|
513 |
+
@staticmethod
|
514 |
+
def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
|
515 |
+
boxes, classes, scores = x
|
516 |
+
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
|
517 |
+
scores_inp = tf.reduce_max(scores, -1)
|
518 |
+
selected_inds = tf.image.non_max_suppression(boxes,
|
519 |
+
scores_inp,
|
520 |
+
max_output_size=topk_all,
|
521 |
+
iou_threshold=iou_thres,
|
522 |
+
score_threshold=conf_thres)
|
523 |
+
selected_boxes = tf.gather(boxes, selected_inds)
|
524 |
+
padded_boxes = tf.pad(selected_boxes,
|
525 |
+
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
|
526 |
+
mode='CONSTANT',
|
527 |
+
constant_values=0.0)
|
528 |
+
selected_scores = tf.gather(scores_inp, selected_inds)
|
529 |
+
padded_scores = tf.pad(selected_scores,
|
530 |
+
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
531 |
+
mode='CONSTANT',
|
532 |
+
constant_values=-1.0)
|
533 |
+
selected_classes = tf.gather(class_inds, selected_inds)
|
534 |
+
padded_classes = tf.pad(selected_classes,
|
535 |
+
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
536 |
+
mode='CONSTANT',
|
537 |
+
constant_values=-1.0)
|
538 |
+
valid_detections = tf.shape(selected_inds)[0]
|
539 |
+
return padded_boxes, padded_scores, padded_classes, valid_detections
|
540 |
+
|
541 |
+
|
542 |
+
def activations(act=nn.SiLU):
|
543 |
+
# Returns TF activation from input PyTorch activation
|
544 |
+
if isinstance(act, nn.LeakyReLU):
|
545 |
+
return lambda x: keras.activations.relu(x, alpha=0.1)
|
546 |
+
elif isinstance(act, nn.Hardswish):
|
547 |
+
return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
|
548 |
+
elif isinstance(act, (nn.SiLU, SiLU)):
|
549 |
+
return lambda x: keras.activations.swish(x)
|
550 |
+
else:
|
551 |
+
raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
|
552 |
+
|
553 |
+
|
554 |
+
def representative_dataset_gen(dataset, ncalib=100):
|
555 |
+
# Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
556 |
+
for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
|
557 |
+
im = np.transpose(img, [1, 2, 0])
|
558 |
+
im = np.expand_dims(im, axis=0).astype(np.float32)
|
559 |
+
im /= 255
|
560 |
+
yield [im]
|
561 |
+
if n >= ncalib:
|
562 |
+
break
|
563 |
+
|
564 |
+
|
565 |
+
def run(
|
566 |
+
weights=ROOT / 'yolov5s.pt', # weights path
|
567 |
+
imgsz=(640, 640), # inference size h,w
|
568 |
+
batch_size=1, # batch size
|
569 |
+
dynamic=False, # dynamic batch size
|
570 |
+
):
|
571 |
+
# PyTorch model
|
572 |
+
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
573 |
+
model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
|
574 |
+
_ = model(im) # inference
|
575 |
+
model.info()
|
576 |
+
|
577 |
+
# TensorFlow model
|
578 |
+
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
579 |
+
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
580 |
+
_ = tf_model.predict(im) # inference
|
581 |
+
|
582 |
+
# Keras model
|
583 |
+
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
584 |
+
keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
|
585 |
+
keras_model.summary()
|
586 |
+
|
587 |
+
LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
|
588 |
+
|
589 |
+
|
590 |
+
def parse_opt():
|
591 |
+
parser = argparse.ArgumentParser()
|
592 |
+
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
593 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
594 |
+
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
595 |
+
parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
|
596 |
+
opt = parser.parse_args()
|
597 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
598 |
+
print_args(vars(opt))
|
599 |
+
return opt
|
600 |
+
|
601 |
+
|
602 |
+
def main():
|
603 |
+
opt = parse_opt()
|
604 |
+
run(**vars(opt))
|
605 |
+
|
606 |
+
|
607 |
+
if __name__ == "__main__":
|
608 |
+
main()
|
infer/yolov5/models/yolo.py
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
YOLO-specific modules
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
$ python models/yolo.py --cfg yolov5s.yaml
|
7 |
+
"""
|
8 |
+
|
9 |
+
import argparse
|
10 |
+
import contextlib
|
11 |
+
import os
|
12 |
+
import platform
|
13 |
+
import sys
|
14 |
+
from copy import deepcopy
|
15 |
+
from pathlib import Path
|
16 |
+
|
17 |
+
FILE = Path(__file__).resolve()
|
18 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
19 |
+
if str(ROOT) not in sys.path:
|
20 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
21 |
+
if platform.system() != 'Windows':
|
22 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
23 |
+
|
24 |
+
from infer.yolov5.models.common import *
|
25 |
+
from infer.yolov5.models.experimental import *
|
26 |
+
from infer.yolov5.utils.autoanchor import check_anchor_order
|
27 |
+
from infer.yolov5.utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
|
28 |
+
from infer.yolov5.utils.plots import feature_visualization
|
29 |
+
from infer.yolov5.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
|
30 |
+
time_sync)
|
31 |
+
|
32 |
+
try:
|
33 |
+
import thop # for FLOPs computation
|
34 |
+
except ImportError:
|
35 |
+
thop = None
|
36 |
+
|
37 |
+
|
38 |
+
class Detect(nn.Module):
|
39 |
+
# YOLOv5 Detect head for detection models
|
40 |
+
stride = None # strides computed during build
|
41 |
+
dynamic = False # force grid reconstruction
|
42 |
+
export = False # export mode
|
43 |
+
|
44 |
+
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
|
45 |
+
super().__init__()
|
46 |
+
self.nc = nc # number of classes
|
47 |
+
self.no = nc + 5 # number of outputs per anchor
|
48 |
+
self.nl = len(anchors) # number of detection layers
|
49 |
+
self.na = len(anchors[0]) // 2 # number of anchors
|
50 |
+
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
|
51 |
+
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
|
52 |
+
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
|
53 |
+
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
54 |
+
self.inplace = inplace # use inplace ops (e.g. slice assignment)
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
z = [] # inference output
|
58 |
+
for i in range(self.nl):
|
59 |
+
x[i] = self.m[i](x[i]) # conv
|
60 |
+
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
61 |
+
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
62 |
+
|
63 |
+
if not self.training: # inference
|
64 |
+
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
65 |
+
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
|
66 |
+
|
67 |
+
if isinstance(self, Segment): # (boxes + masks)
|
68 |
+
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
|
69 |
+
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
|
70 |
+
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
|
71 |
+
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
|
72 |
+
else: # Detect (boxes only)
|
73 |
+
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
|
74 |
+
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
|
75 |
+
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
|
76 |
+
y = torch.cat((xy, wh, conf), 4)
|
77 |
+
z.append(y.view(bs, self.na * nx * ny, self.no))
|
78 |
+
|
79 |
+
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
|
80 |
+
|
81 |
+
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
|
82 |
+
d = self.anchors[i].device
|
83 |
+
t = self.anchors[i].dtype
|
84 |
+
shape = 1, self.na, ny, nx, 2 # grid shape
|
85 |
+
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
|
86 |
+
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
|
87 |
+
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
|
88 |
+
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
|
89 |
+
return grid, anchor_grid
|
90 |
+
|
91 |
+
|
92 |
+
class Segment(Detect):
|
93 |
+
# YOLOv5 Segment head for segmentation models
|
94 |
+
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
|
95 |
+
super().__init__(nc, anchors, ch, inplace)
|
96 |
+
self.nm = nm # number of masks
|
97 |
+
self.npr = npr # number of protos
|
98 |
+
self.no = 5 + nc + self.nm # number of outputs per anchor
|
99 |
+
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
100 |
+
self.proto = Proto(ch[0], self.npr, self.nm) # protos
|
101 |
+
self.detect = Detect.forward
|
102 |
+
|
103 |
+
def forward(self, x):
|
104 |
+
p = self.proto(x[0])
|
105 |
+
x = self.detect(self, x)
|
106 |
+
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
|
107 |
+
|
108 |
+
|
109 |
+
class BaseModel(nn.Module):
|
110 |
+
# YOLOv5 base model
|
111 |
+
def forward(self, x, profile=False, visualize=False):
|
112 |
+
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
113 |
+
|
114 |
+
def _forward_once(self, x, profile=False, visualize=False):
|
115 |
+
y, dt = [], [] # outputs
|
116 |
+
for m in self.model:
|
117 |
+
if m.f != -1: # if not from previous layer
|
118 |
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
119 |
+
if profile:
|
120 |
+
self._profile_one_layer(m, x, dt)
|
121 |
+
x = m(x) # run
|
122 |
+
y.append(x if m.i in self.save else None) # save output
|
123 |
+
if visualize:
|
124 |
+
feature_visualization(x, m.type, m.i, save_dir=visualize)
|
125 |
+
return x
|
126 |
+
|
127 |
+
def _profile_one_layer(self, m, x, dt):
|
128 |
+
c = m == self.model[-1] # is final layer, copy input as inplace fix
|
129 |
+
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
130 |
+
t = time_sync()
|
131 |
+
for _ in range(10):
|
132 |
+
m(x.copy() if c else x)
|
133 |
+
dt.append((time_sync() - t) * 100)
|
134 |
+
if m == self.model[0]:
|
135 |
+
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
|
136 |
+
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
|
137 |
+
if c:
|
138 |
+
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
|
139 |
+
|
140 |
+
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
141 |
+
LOGGER.info('Fusing layers... ')
|
142 |
+
for m in self.model.modules():
|
143 |
+
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
|
144 |
+
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
145 |
+
delattr(m, 'bn') # remove batchnorm
|
146 |
+
m.forward = m.forward_fuse # update forward
|
147 |
+
self.info()
|
148 |
+
return self
|
149 |
+
|
150 |
+
def info(self, verbose=False, img_size=640): # print model information
|
151 |
+
model_info(self, verbose, img_size)
|
152 |
+
|
153 |
+
def _apply(self, fn):
|
154 |
+
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
155 |
+
self = super()._apply(fn)
|
156 |
+
m = self.model[-1] # Detect()
|
157 |
+
if isinstance(m, (Detect, Segment)):
|
158 |
+
m.stride = fn(m.stride)
|
159 |
+
m.grid = list(map(fn, m.grid))
|
160 |
+
if isinstance(m.anchor_grid, list):
|
161 |
+
m.anchor_grid = list(map(fn, m.anchor_grid))
|
162 |
+
return self
|
163 |
+
|
164 |
+
|
165 |
+
class DetectionModel(BaseModel):
|
166 |
+
# YOLOv5 detection model
|
167 |
+
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
|
168 |
+
super().__init__()
|
169 |
+
if isinstance(cfg, dict):
|
170 |
+
self.yaml = cfg # model dict
|
171 |
+
else: # is *.yaml
|
172 |
+
import yaml # for torch hub
|
173 |
+
self.yaml_file = Path(cfg).name
|
174 |
+
with open(cfg, encoding='ascii', errors='ignore') as f:
|
175 |
+
self.yaml = yaml.safe_load(f) # model dict
|
176 |
+
|
177 |
+
# Define model
|
178 |
+
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
179 |
+
if nc and nc != self.yaml['nc']:
|
180 |
+
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
181 |
+
self.yaml['nc'] = nc # override yaml value
|
182 |
+
if anchors:
|
183 |
+
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
|
184 |
+
self.yaml['anchors'] = round(anchors) # override yaml value
|
185 |
+
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
186 |
+
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
187 |
+
self.inplace = self.yaml.get('inplace', True)
|
188 |
+
|
189 |
+
# Build strides, anchors
|
190 |
+
m = self.model[-1] # Detect()
|
191 |
+
if isinstance(m, (Detect, Segment)):
|
192 |
+
s = 256 # 2x min stride
|
193 |
+
m.inplace = self.inplace
|
194 |
+
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
195 |
+
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
196 |
+
check_anchor_order(m)
|
197 |
+
m.anchors /= m.stride.view(-1, 1, 1)
|
198 |
+
self.stride = m.stride
|
199 |
+
self._initialize_biases() # only run once
|
200 |
+
|
201 |
+
# Init weights, biases
|
202 |
+
initialize_weights(self)
|
203 |
+
self.info()
|
204 |
+
LOGGER.info('')
|
205 |
+
|
206 |
+
def forward(self, x, augment=False, profile=False, visualize=False):
|
207 |
+
if augment:
|
208 |
+
return self._forward_augment(x) # augmented inference, None
|
209 |
+
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
210 |
+
|
211 |
+
def _forward_augment(self, x):
|
212 |
+
img_size = x.shape[-2:] # height, width
|
213 |
+
s = [1, 0.83, 0.67] # scales
|
214 |
+
f = [None, 3, None] # flips (2-ud, 3-lr)
|
215 |
+
y = [] # outputs
|
216 |
+
for si, fi in zip(s, f):
|
217 |
+
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
|
218 |
+
yi = self._forward_once(xi)[0] # forward
|
219 |
+
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
220 |
+
yi = self._descale_pred(yi, fi, si, img_size)
|
221 |
+
y.append(yi)
|
222 |
+
y = self._clip_augmented(y) # clip augmented tails
|
223 |
+
return torch.cat(y, 1), None # augmented inference, train
|
224 |
+
|
225 |
+
def _descale_pred(self, p, flips, scale, img_size):
|
226 |
+
# de-scale predictions following augmented inference (inverse operation)
|
227 |
+
if self.inplace:
|
228 |
+
p[..., :4] /= scale # de-scale
|
229 |
+
if flips == 2:
|
230 |
+
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
|
231 |
+
elif flips == 3:
|
232 |
+
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
|
233 |
+
else:
|
234 |
+
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
|
235 |
+
if flips == 2:
|
236 |
+
y = img_size[0] - y # de-flip ud
|
237 |
+
elif flips == 3:
|
238 |
+
x = img_size[1] - x # de-flip lr
|
239 |
+
p = torch.cat((x, y, wh, p[..., 4:]), -1)
|
240 |
+
return p
|
241 |
+
|
242 |
+
def _clip_augmented(self, y):
|
243 |
+
# Clip YOLOv5 augmented inference tails
|
244 |
+
nl = self.model[-1].nl # number of detection layers (P3-P5)
|
245 |
+
g = sum(4 ** x for x in range(nl)) # grid points
|
246 |
+
e = 1 # exclude layer count
|
247 |
+
i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
|
248 |
+
y[0] = y[0][:, :-i] # large
|
249 |
+
i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
|
250 |
+
y[-1] = y[-1][:, i:] # small
|
251 |
+
return y
|
252 |
+
|
253 |
+
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
254 |
+
# https://arxiv.org/abs/1708.02002 section 3.3
|
255 |
+
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
256 |
+
m = self.model[-1] # Detect() module
|
257 |
+
for mi, s in zip(m.m, m.stride): # from
|
258 |
+
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
259 |
+
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
260 |
+
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
|
261 |
+
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
262 |
+
|
263 |
+
|
264 |
+
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
|
265 |
+
|
266 |
+
|
267 |
+
class SegmentationModel(DetectionModel):
|
268 |
+
# YOLOv5 segmentation model
|
269 |
+
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
|
270 |
+
super().__init__(cfg, ch, nc, anchors)
|
271 |
+
|
272 |
+
|
273 |
+
class ClassificationModel(BaseModel):
|
274 |
+
# YOLOv5 classification model
|
275 |
+
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
|
276 |
+
super().__init__()
|
277 |
+
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
|
278 |
+
|
279 |
+
def _from_detection_model(self, model, nc=1000, cutoff=10):
|
280 |
+
# Create a YOLOv5 classification model from a YOLOv5 detection model
|
281 |
+
if isinstance(model, DetectMultiBackend):
|
282 |
+
model = model.model # unwrap DetectMultiBackend
|
283 |
+
model.model = model.model[:cutoff] # backbone
|
284 |
+
m = model.model[-1] # last layer
|
285 |
+
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
|
286 |
+
c = Classify(ch, nc) # Classify()
|
287 |
+
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
|
288 |
+
model.model[-1] = c # replace
|
289 |
+
self.model = model.model
|
290 |
+
self.stride = model.stride
|
291 |
+
self.save = []
|
292 |
+
self.nc = nc
|
293 |
+
|
294 |
+
def _from_yaml(self, cfg):
|
295 |
+
# Create a YOLOv5 classification model from a *.yaml file
|
296 |
+
self.model = None
|
297 |
+
|
298 |
+
|
299 |
+
def parse_model(d, ch): # model_dict, input_channels(3)
|
300 |
+
# Parse a YOLOv5 model.yaml dictionary
|
301 |
+
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
302 |
+
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
|
303 |
+
if act:
|
304 |
+
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
|
305 |
+
LOGGER.info(f"{colorstr('activation:')} {act}") # print
|
306 |
+
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
307 |
+
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
308 |
+
|
309 |
+
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
310 |
+
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
311 |
+
m = eval(m) if isinstance(m, str) else m # eval strings
|
312 |
+
for j, a in enumerate(args):
|
313 |
+
with contextlib.suppress(NameError):
|
314 |
+
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
315 |
+
|
316 |
+
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
|
317 |
+
if m in {
|
318 |
+
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
|
319 |
+
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
|
320 |
+
c1, c2 = ch[f], args[0]
|
321 |
+
if c2 != no: # if not output
|
322 |
+
c2 = make_divisible(c2 * gw, 8)
|
323 |
+
|
324 |
+
args = [c1, c2, *args[1:]]
|
325 |
+
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
|
326 |
+
args.insert(2, n) # number of repeats
|
327 |
+
n = 1
|
328 |
+
elif m is nn.BatchNorm2d:
|
329 |
+
args = [ch[f]]
|
330 |
+
elif m is Concat:
|
331 |
+
c2 = sum(ch[x] for x in f)
|
332 |
+
# TODO: channel, gw, gd
|
333 |
+
elif m in {Detect, Segment}:
|
334 |
+
args.append([ch[x] for x in f])
|
335 |
+
if isinstance(args[1], int): # number of anchors
|
336 |
+
args[1] = [list(range(args[1] * 2))] * len(f)
|
337 |
+
if m is Segment:
|
338 |
+
args[3] = make_divisible(args[3] * gw, 8)
|
339 |
+
elif m is Contract:
|
340 |
+
c2 = ch[f] * args[0] ** 2
|
341 |
+
elif m is Expand:
|
342 |
+
c2 = ch[f] // args[0] ** 2
|
343 |
+
else:
|
344 |
+
c2 = ch[f]
|
345 |
+
|
346 |
+
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
347 |
+
t = str(m)[8:-2].replace('__main__.', '') # module type
|
348 |
+
np = sum(x.numel() for x in m_.parameters()) # number params
|
349 |
+
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
350 |
+
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
|
351 |
+
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
352 |
+
layers.append(m_)
|
353 |
+
if i == 0:
|
354 |
+
ch = []
|
355 |
+
ch.append(c2)
|
356 |
+
return nn.Sequential(*layers), sorted(save)
|
357 |
+
|
358 |
+
|
359 |
+
if __name__ == '__main__':
|
360 |
+
parser = argparse.ArgumentParser()
|
361 |
+
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
362 |
+
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
|
363 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
364 |
+
parser.add_argument('--profile', action='store_true', help='profile model speed')
|
365 |
+
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
|
366 |
+
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
|
367 |
+
opt = parser.parse_args()
|
368 |
+
opt.cfg = check_yaml(opt.cfg) # check YAML
|
369 |
+
print_args(vars(opt))
|
370 |
+
device = select_device(opt.device)
|
371 |
+
|
372 |
+
# Create model
|
373 |
+
im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
|
374 |
+
model = Model(opt.cfg).to(device)
|
375 |
+
|
376 |
+
# Options
|
377 |
+
if opt.line_profile: # profile layer by layer
|
378 |
+
model(im, profile=True)
|
379 |
+
|
380 |
+
elif opt.profile: # profile forward-backward
|
381 |
+
results = profile(input=im, ops=[model], n=3)
|
382 |
+
|
383 |
+
elif opt.test: # test all models
|
384 |
+
for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
|
385 |
+
try:
|
386 |
+
_ = Model(cfg)
|
387 |
+
except Exception as e:
|
388 |
+
print(f'Error in {cfg}: {e}')
|
389 |
+
|
390 |
+
else: # report fused model summary
|
391 |
+
model.fuse()
|
infer/yolov5/models/yolov5l.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.0 # model depth multiple
|
6 |
+
width_multiple: 1.0 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/yolov5m.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.67 # model depth multiple
|
6 |
+
width_multiple: 0.75 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/yolov5n.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/yolov5s.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 0.33 # model depth multiple
|
6 |
+
width_multiple: 0.50 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/models/yolov5x.yaml
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
|
3 |
+
# Parameters
|
4 |
+
nc: 80 # number of classes
|
5 |
+
depth_multiple: 1.33 # model depth multiple
|
6 |
+
width_multiple: 1.25 # layer channel multiple
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# YOLOv5 v6.0 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
16 |
+
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
17 |
+
[-1, 3, C3, [128]],
|
18 |
+
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
+
[-1, 6, C3, [256]],
|
20 |
+
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
+
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
+
[-1, 3, C3, [1024]],
|
24 |
+
[-1, 1, SPPF, [1024, 5]], # 9
|
25 |
+
]
|
26 |
+
|
27 |
+
# YOLOv5 v6.0 head
|
28 |
+
head:
|
29 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
30 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
31 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
32 |
+
[-1, 3, C3, [512, False]], # 13
|
33 |
+
|
34 |
+
[-1, 1, Conv, [256, 1, 1]],
|
35 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
36 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
37 |
+
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
38 |
+
|
39 |
+
[-1, 1, Conv, [256, 3, 2]],
|
40 |
+
[[-1, 14], 1, Concat, [1]], # cat head P4
|
41 |
+
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
42 |
+
|
43 |
+
[-1, 1, Conv, [512, 3, 2]],
|
44 |
+
[[-1, 10], 1, Concat, [1]], # cat head P5
|
45 |
+
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
46 |
+
|
47 |
+
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
48 |
+
]
|
infer/yolov5/segment/__init__.py
ADDED
File without changes
|
infer/yolov5/segment/predict.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
2 |
+
"""
|
3 |
+
Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
|
4 |
+
|
5 |
+
Usage - sources:
|
6 |
+
$ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
|
7 |
+
img.jpg # image
|
8 |
+
vid.mp4 # video
|
9 |
+
screen # screenshot
|
10 |
+
path/ # directory
|
11 |
+
list.txt # list of images
|
12 |
+
list.streams # list of streams
|
13 |
+
'path/*.jpg' # glob
|
14 |
+
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
15 |
+
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
16 |
+
|
17 |
+
Usage - formats:
|
18 |
+
$ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
|
19 |
+
yolov5s-seg.torchscript # TorchScript
|
20 |
+
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
21 |
+
yolov5s-seg_openvino_model # OpenVINO
|
22 |
+
yolov5s-seg.engine # TensorRT
|
23 |
+
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
24 |
+
yolov5s-seg_saved_model # TensorFlow SavedModel
|
25 |
+
yolov5s-seg.pb # TensorFlow GraphDef
|
26 |
+
yolov5s-seg.tflite # TensorFlow Lite
|
27 |
+
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
28 |
+
yolov5s-seg_paddle_model # PaddlePaddle
|
29 |
+
"""
|
30 |
+
|
31 |
+
import argparse
|
32 |
+
import os
|
33 |
+
import platform
|
34 |
+
import sys
|
35 |
+
from pathlib import Path
|
36 |
+
|
37 |
+
import torch
|
38 |
+
|
39 |
+
FILE = Path(__file__).resolve()
|
40 |
+
ROOT = FILE.parents[1] # YOLOv5 root directory
|
41 |
+
if str(ROOT) not in sys.path:
|
42 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
43 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
44 |
+
|
45 |
+
from infer.yolov5.models.common import DetectMultiBackend
|
46 |
+
from infer.yolov5.utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
47 |
+
from infer.yolov5.utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
48 |
+
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
49 |
+
strip_optimizer)
|
50 |
+
from infer.yolov5.utils.plots import Annotator, colors, save_one_box
|
51 |
+
from infer.yolov5.utils.segment.general import masks2segments, process_mask, process_mask_native
|
52 |
+
from infer.yolov5.utils.torch_utils import select_device, smart_inference_mode
|
53 |
+
|
54 |
+
|
55 |
+
@smart_inference_mode()
|
56 |
+
def run(
|
57 |
+
weights='yolov5s-seg.pt', # model.pt path(s)
|
58 |
+
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
59 |
+
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
60 |
+
imgsz=None, # inference size (pixels)
|
61 |
+
img=None, # inference size (pixels)
|
62 |
+
conf_thres=0.25, # confidence threshold
|
63 |
+
iou_thres=0.45, # NMS IOU threshold
|
64 |
+
max_det=1000, # maximum detections per image
|
65 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
66 |
+
view_img=False, # show results
|
67 |
+
save_txt=False, # save results to *.txt
|
68 |
+
save_conf=False, # save confidences in --save-txt labels
|
69 |
+
save_crop=False, # save cropped prediction boxes
|
70 |
+
nosave=False, # do not save images/videos
|
71 |
+
classes=None, # filter by class: --class 0, or --class 0 2 3
|
72 |
+
agnostic_nms=False, # class-agnostic NMS
|
73 |
+
augment=False, # augmented inference
|
74 |
+
visualize=False, # visualize features
|
75 |
+
update=False, # update all models
|
76 |
+
project=ROOT / 'runs/predict-seg', # save results to project/name
|
77 |
+
name='exp', # save results to project/name
|
78 |
+
exist_ok=False, # existing project/name ok, do not increment
|
79 |
+
line_thickness=3, # bounding box thickness (pixels)
|
80 |
+
hide_labels=False, # hide labels
|
81 |
+
hide_conf=False, # hide confidences
|
82 |
+
half=False, # use FP16 half-precision inference
|
83 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
84 |
+
vid_stride=1, # video frame-rate stride
|
85 |
+
retina_masks=False,
|
86 |
+
):
|
87 |
+
source = str(source)
|
88 |
+
save_img = not nosave and not source.endswith('.txt') # save inference images
|
89 |
+
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
90 |
+
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
91 |
+
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
92 |
+
screenshot = source.lower().startswith('screen')
|
93 |
+
if is_url and is_file:
|
94 |
+
source = check_file(source) # download
|
95 |
+
|
96 |
+
if imgsz is None and img is None:
|
97 |
+
imgsz = 640
|
98 |
+
elif img is not None:
|
99 |
+
imgsz = img
|
100 |
+
|
101 |
+
if isinstance(imgsz, int):
|
102 |
+
imgsz = [imgsz, imgsz]
|
103 |
+
|
104 |
+
# Directories
|
105 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
106 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
107 |
+
|
108 |
+
# Load model
|
109 |
+
device = select_device(device)
|
110 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
111 |
+
stride, names, pt = model.stride, model.names, model.pt
|
112 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
113 |
+
|
114 |
+
# Dataloader
|
115 |
+
bs = 1 # batch_size
|
116 |
+
if webcam:
|
117 |
+
view_img = check_imshow(warn=True)
|
118 |
+
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
119 |
+
bs = len(dataset)
|
120 |
+
elif screenshot:
|
121 |
+
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
122 |
+
else:
|
123 |
+
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
124 |
+
vid_path, vid_writer = [None] * bs, [None] * bs
|
125 |
+
|
126 |
+
# Run inference
|
127 |
+
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
128 |
+
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
129 |
+
for path, im, im0s, vid_cap, s in dataset:
|
130 |
+
with dt[0]:
|
131 |
+
im = torch.from_numpy(im).to(model.device)
|
132 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
133 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
134 |
+
if len(im.shape) == 3:
|
135 |
+
im = im[None] # expand for batch dim
|
136 |
+
|
137 |
+
# Inference
|
138 |
+
with dt[1]:
|
139 |
+
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
140 |
+
pred, proto = model(im, augment=augment, visualize=visualize)[:2]
|
141 |
+
|
142 |
+
# NMS
|
143 |
+
with dt[2]:
|
144 |
+
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
|
145 |
+
|
146 |
+
# Second-stage classifier (optional)
|
147 |
+
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
148 |
+
|
149 |
+
# Process predictions
|
150 |
+
for i, det in enumerate(pred): # per image
|
151 |
+
seen += 1
|
152 |
+
if webcam: # batch_size >= 1
|
153 |
+
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
154 |
+
s += f'{i}: '
|
155 |
+
else:
|
156 |
+
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
157 |
+
|
158 |
+
p = Path(p) # to Path
|
159 |
+
save_path = str(save_dir / p.name) # im.jpg
|
160 |
+
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
161 |
+
s += '%gx%g ' % im.shape[2:] # print string
|
162 |
+
imc = im0.copy() if save_crop else im0 # for save_crop
|
163 |
+
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
164 |
+
if len(det):
|
165 |
+
if retina_masks:
|
166 |
+
# scale bbox first the crop masks
|
167 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
168 |
+
masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
|
169 |
+
else:
|
170 |
+
masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
|
171 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
172 |
+
|
173 |
+
# Segments
|
174 |
+
if save_txt:
|
175 |
+
segments = [
|
176 |
+
scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
|
177 |
+
for x in reversed(masks2segments(masks))]
|
178 |
+
|
179 |
+
# Print results
|
180 |
+
for c in det[:, 5].unique():
|
181 |
+
n = (det[:, 5] == c).sum() # detections per class
|
182 |
+
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
183 |
+
|
184 |
+
# Mask plotting
|
185 |
+
annotator.masks(
|
186 |
+
masks,
|
187 |
+
colors=[colors(x, True) for x in det[:, 5]],
|
188 |
+
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() /
|
189 |
+
255 if retina_masks else im[i])
|
190 |
+
|
191 |
+
# Write results
|
192 |
+
for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
|
193 |
+
if save_txt: # Write to file
|
194 |
+
seg = segments[j].reshape(-1) # (n,2) to (n*2)
|
195 |
+
line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
|
196 |
+
with open(f'{txt_path}.txt', 'a') as f:
|
197 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
198 |
+
|
199 |
+
if save_img or save_crop or view_img: # Add bbox to image
|
200 |
+
c = int(cls) # integer class
|
201 |
+
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
202 |
+
annotator.box_label(xyxy, label, color=colors(c, True))
|
203 |
+
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
|
204 |
+
if save_crop:
|
205 |
+
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
206 |
+
|
207 |
+
# Stream results
|
208 |
+
im0 = annotator.result()
|
209 |
+
if view_img:
|
210 |
+
if platform.system() == 'Linux' and p not in windows:
|
211 |
+
windows.append(p)
|
212 |
+
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
213 |
+
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
214 |
+
cv2.imshow(str(p), im0)
|
215 |
+
if cv2.waitKey(1) == ord('q'): # 1 millisecond
|
216 |
+
exit()
|
217 |
+
|
218 |
+
# Save results (image with detections)
|
219 |
+
if save_img:
|
220 |
+
if dataset.mode == 'image':
|
221 |
+
cv2.imwrite(save_path, im0)
|
222 |
+
else: # 'video' or 'stream'
|
223 |
+
if vid_path[i] != save_path: # new video
|
224 |
+
vid_path[i] = save_path
|
225 |
+
if isinstance(vid_writer[i], cv2.VideoWriter):
|
226 |
+
vid_writer[i].release() # release previous video writer
|
227 |
+
if vid_cap: # video
|
228 |
+
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
229 |
+
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
230 |
+
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
231 |
+
else: # stream
|
232 |
+
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
233 |
+
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
234 |
+
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
235 |
+
vid_writer[i].write(im0)
|
236 |
+
|
237 |
+
# Print time (inference-only)
|
238 |
+
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
239 |
+
|
240 |
+
# Print results
|
241 |
+
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
242 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
243 |
+
if save_txt or save_img:
|
244 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
245 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
246 |
+
if update:
|
247 |
+
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
248 |
+
|
249 |
+
|
250 |
+
def parse_opt():
|
251 |
+
parser = argparse.ArgumentParser()
|
252 |
+
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s-seg.pt', help='model path(s)')
|
253 |
+
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
254 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
255 |
+
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
256 |
+
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
257 |
+
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
258 |
+
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
259 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
260 |
+
parser.add_argument('--view-img', action='store_true', help='show results')
|
261 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
262 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
263 |
+
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
264 |
+
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
265 |
+
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
266 |
+
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
267 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
268 |
+
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
269 |
+
parser.add_argument('--update', action='store_true', help='update all models')
|
270 |
+
parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
|
271 |
+
parser.add_argument('--name', default='exp', help='save results to project/name')
|
272 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
273 |
+
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
274 |
+
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
275 |
+
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
276 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
277 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
278 |
+
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
279 |
+
parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
|
280 |
+
opt = parser.parse_args()
|
281 |
+
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
282 |
+
print_args(vars(opt))
|
283 |
+
return opt
|
284 |
+
|
285 |
+
|
286 |
+
def main(opt):
|
287 |
+
check_requirements(exclude=('tensorboard', 'thop'))
|
288 |
+
run(**vars(opt))
|
289 |
+
|
290 |
+
|
291 |
+
if __name__ == '__main__':
|
292 |
+
opt = parse_opt()
|
293 |
+
main(opt)
|