nakamura196 commited on
Commit
1135767
1 Parent(s): 0f967fc
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -31
  2. .gitignore +0 -4
  3. README.md +0 -12
  4. app.py +0 -48
  5. best.pt +0 -3
  6. requirements.txt +0 -33
  7. ultralytics/yolov5/.dockerignore +0 -222
  8. ultralytics/yolov5/.gitattributes +0 -2
  9. ultralytics/yolov5/.gitignore +0 -256
  10. ultralytics/yolov5/.pre-commit-config.yaml +0 -66
  11. ultralytics/yolov5/CONTRIBUTING.md +0 -94
  12. ultralytics/yolov5/Dockerfile +0 -65
  13. ultralytics/yolov5/LICENSE +0 -674
  14. ultralytics/yolov5/README.md +0 -304
  15. ultralytics/yolov5/data/Argoverse.yaml +0 -67
  16. ultralytics/yolov5/data/GlobalWheat2020.yaml +0 -54
  17. ultralytics/yolov5/data/Objects365.yaml +0 -113
  18. ultralytics/yolov5/data/SKU-110K.yaml +0 -53
  19. ultralytics/yolov5/data/VOC.yaml +0 -80
  20. ultralytics/yolov5/data/VisDrone.yaml +0 -61
  21. ultralytics/yolov5/data/coco.yaml +0 -45
  22. ultralytics/yolov5/data/coco128.yaml +0 -30
  23. ultralytics/yolov5/data/hyps/hyp.Objects365.yaml +0 -34
  24. ultralytics/yolov5/data/hyps/hyp.VOC.yaml +0 -40
  25. ultralytics/yolov5/data/hyps/hyp.scratch-high.yaml +0 -34
  26. ultralytics/yolov5/data/hyps/hyp.scratch-low.yaml +0 -34
  27. ultralytics/yolov5/data/hyps/hyp.scratch-med.yaml +0 -34
  28. ultralytics/yolov5/data/images/bus.jpg +0 -0
  29. ultralytics/yolov5/data/images/zidane.jpg +0 -0
  30. ultralytics/yolov5/data/scripts/download_weights.sh +0 -20
  31. ultralytics/yolov5/data/scripts/get_coco.sh +0 -27
  32. ultralytics/yolov5/data/scripts/get_coco128.sh +0 -17
  33. ultralytics/yolov5/data/xView.yaml +0 -102
  34. ultralytics/yolov5/detect.py +0 -252
  35. ultralytics/yolov5/export.py +0 -559
  36. ultralytics/yolov5/hubconf.py +0 -143
  37. ultralytics/yolov5/models/__init__.py +0 -0
  38. ultralytics/yolov5/models/common.py +0 -684
  39. ultralytics/yolov5/models/experimental.py +0 -121
  40. ultralytics/yolov5/models/hub/anchors.yaml +0 -59
  41. ultralytics/yolov5/models/hub/yolov3-spp.yaml +0 -51
  42. ultralytics/yolov5/models/hub/yolov3-tiny.yaml +0 -41
  43. ultralytics/yolov5/models/hub/yolov3.yaml +0 -51
  44. ultralytics/yolov5/models/hub/yolov5-bifpn.yaml +0 -48
  45. ultralytics/yolov5/models/hub/yolov5-fpn.yaml +0 -42
  46. ultralytics/yolov5/models/hub/yolov5-p2.yaml +0 -54
  47. ultralytics/yolov5/models/hub/yolov5-p34.yaml +0 -41
  48. ultralytics/yolov5/models/hub/yolov5-p6.yaml +0 -56
  49. ultralytics/yolov5/models/hub/yolov5-p7.yaml +0 -67
  50. ultralytics/yolov5/models/hub/yolov5-panet.yaml +0 -48
.gitattributes DELETED
@@ -1,31 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.npy filter=lfs diff=lfs merge=lfs -text
13
- *.npz filter=lfs diff=lfs merge=lfs -text
14
- *.onnx filter=lfs diff=lfs merge=lfs -text
15
- *.ot filter=lfs diff=lfs merge=lfs -text
16
- *.parquet filter=lfs diff=lfs merge=lfs -text
17
- *.pickle filter=lfs diff=lfs merge=lfs -text
18
- *.pkl filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pt filter=lfs diff=lfs merge=lfs -text
21
- *.pth filter=lfs diff=lfs merge=lfs -text
22
- *.rar filter=lfs diff=lfs merge=lfs -text
23
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
- *.tar.* filter=lfs diff=lfs merge=lfs -text
25
- *.tflite filter=lfs diff=lfs merge=lfs -text
26
- *.tgz filter=lfs diff=lfs merge=lfs -text
27
- *.wasm filter=lfs diff=lfs merge=lfs -text
28
- *.xz filter=lfs diff=lfs merge=lfs -text
29
- *.zip filter=lfs diff=lfs merge=lfs -text
30
- *.zstandard filter=lfs diff=lfs merge=lfs -text
31
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,4 +0,0 @@
1
- .DS_Store
2
- yolov5s.pt
3
- __pycache__
4
- gradio_queue.db
 
 
 
 
 
README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Yolov5 Char
3
- emoji: 🌍
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py DELETED
@@ -1,48 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from PIL import Image
4
- import json
5
- # import gdown
6
-
7
- '''
8
- # a file
9
- url = "https://drive.google.com/uc?id=1-ZIa4KsSjhup4Pep70uBvI4BjnSUbocX"
10
- output = "best.pt"
11
- gdown.download(url, output, quiet=False)
12
- '''
13
-
14
- # Images
15
- torch.hub.download_url_to_file(
16
- 'https://iiif.dl.itc.u-tokyo.ac.jp/iiif/genji/TIFF/A00_6587/01/01_0004.tif/full/1024,/0/default.jpg', '『源氏物語』(東京大学総合図書館所蔵).jpg')
17
- torch.hub.download_url_to_file(
18
- 'https://rmda.kulib.kyoto-u.ac.jp/iiif/RB00007030/01/RB00007030_00003_0.ptif/full/1024,/0/default.jpg', '『源氏物語』(京都大学所蔵).jpg')
19
- torch.hub.download_url_to_file(
20
- 'https://kotenseki.nijl.ac.jp/api/iiif/100312034/v4/HRSM/HRSM-00396/HRSM-00396-00012.tif/full/1024,/0/default.jpg', '『平家物語』(国文学研究資料館提供).jpg')
21
-
22
- # Model
23
- # model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
24
- model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', source="local")
25
-
26
- def yolo(im, size=1024):
27
- g = (size / max(im.size)) # gain
28
- im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
29
-
30
- results = model(im) # inference
31
- results.render() # updates results.imgs with boxes and labels
32
-
33
- df = results.pandas().xyxy[0].to_json(orient="records")
34
- res = json.loads(df)
35
-
36
- return [Image.fromarray(results.imgs[0]), res]
37
-
38
-
39
- inputs = gr.inputs.Image(type='pil', label="Original Image")
40
- outputs = [gr.outputs.Image(type="pil", label="Output Image"),
41
- gr.outputs.JSON(label="Output JSON")]
42
-
43
- title = "YOLOv5 Character"
44
- description = "YOLOv5 Character Gradio demo for object detection. Upload an image or click an example image to use."
45
- article = "<p style='text-align: center'>YOLOv5 Character is an object detection model trained on the <a href=\"http://codh.rois.ac.jp/char-shape/\">日本古典籍くずし字データセット</a>.</p>"
46
-
47
- examples = [['『源氏物語』(東京大学総合図書館所蔵).jpg'], ['『源氏物語』(京都大学所蔵).jpg'], ['『平家物語』(国文学研究資料館提供).jpg']]
48
- gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True) # cache_examples=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
best.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:772f81467ecfbf27a5c9e2b5b1b8b783b94c1db6d56b41b4e7c83996365afc8e
3
- size 691395070
 
 
 
 
requirements.txt DELETED
@@ -1,33 +0,0 @@
1
- # pip install -r requirements.txt
2
-
3
- # base ----------------------------------------
4
- matplotlib>=3.2.2
5
- numpy>=1.18.5
6
- opencv-python-headless
7
- Pillow
8
- PyYAML>=5.3.1
9
- scipy>=1.4.1
10
- torch>=1.7.0
11
- torchvision>=0.8.1
12
- tqdm>=4.41.0
13
-
14
- # logging -------------------------------------
15
- tensorboard>=2.4.1
16
- # wandb
17
-
18
- # plotting ------------------------------------
19
- seaborn>=0.11.0
20
- pandas
21
-
22
- # export --------------------------------------
23
- # coremltools>=4.1
24
- # onnx>=1.9.0
25
- # scikit-learn==0.19.2 # for coreml quantization
26
-
27
- # extras --------------------------------------
28
- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
29
- # pycocotools>=2.0 # COCO mAP
30
- # albumentations>=1.0.3
31
- thop # FLOPs computation
32
-
33
- gdown
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/.dockerignore DELETED
@@ -1,222 +0,0 @@
1
- # Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
2
- #.git
3
- .cache
4
- .idea
5
- runs
6
- output
7
- coco
8
- storage.googleapis.com
9
-
10
- data/samples/*
11
- **/results*.csv
12
- *.jpg
13
-
14
- # Neural Network weights -----------------------------------------------------------------------------------------------
15
- **/*.pt
16
- **/*.pth
17
- **/*.onnx
18
- **/*.engine
19
- **/*.mlmodel
20
- **/*.torchscript
21
- **/*.torchscript.pt
22
- **/*.tflite
23
- **/*.h5
24
- **/*.pb
25
- *_saved_model/
26
- *_web_model/
27
- *_openvino_model/
28
-
29
- # Below Copied From .gitignore -----------------------------------------------------------------------------------------
30
- # Below Copied From .gitignore -----------------------------------------------------------------------------------------
31
-
32
-
33
- # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
34
- # Byte-compiled / optimized / DLL files
35
- __pycache__/
36
- *.py[cod]
37
- *$py.class
38
-
39
- # C extensions
40
- *.so
41
-
42
- # Distribution / packaging
43
- .Python
44
- env/
45
- build/
46
- develop-eggs/
47
- dist/
48
- downloads/
49
- eggs/
50
- .eggs/
51
- lib/
52
- lib64/
53
- parts/
54
- sdist/
55
- var/
56
- wheels/
57
- *.egg-info/
58
- wandb/
59
- .installed.cfg
60
- *.egg
61
-
62
- # PyInstaller
63
- # Usually these files are written by a python script from a template
64
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
65
- *.manifest
66
- *.spec
67
-
68
- # Installer logs
69
- pip-log.txt
70
- pip-delete-this-directory.txt
71
-
72
- # Unit test / coverage reports
73
- htmlcov/
74
- .tox/
75
- .coverage
76
- .coverage.*
77
- .cache
78
- nosetests.xml
79
- coverage.xml
80
- *.cover
81
- .hypothesis/
82
-
83
- # Translations
84
- *.mo
85
- *.pot
86
-
87
- # Django stuff:
88
- *.log
89
- local_settings.py
90
-
91
- # Flask stuff:
92
- instance/
93
- .webassets-cache
94
-
95
- # Scrapy stuff:
96
- .scrapy
97
-
98
- # Sphinx documentation
99
- docs/_build/
100
-
101
- # PyBuilder
102
- target/
103
-
104
- # Jupyter Notebook
105
- .ipynb_checkpoints
106
-
107
- # pyenv
108
- .python-version
109
-
110
- # celery beat schedule file
111
- celerybeat-schedule
112
-
113
- # SageMath parsed files
114
- *.sage.py
115
-
116
- # dotenv
117
- .env
118
-
119
- # virtualenv
120
- .venv*
121
- venv*/
122
- ENV*/
123
-
124
- # Spyder project settings
125
- .spyderproject
126
- .spyproject
127
-
128
- # Rope project settings
129
- .ropeproject
130
-
131
- # mkdocs documentation
132
- /site
133
-
134
- # mypy
135
- .mypy_cache/
136
-
137
-
138
- # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
139
-
140
- # General
141
- .DS_Store
142
- .AppleDouble
143
- .LSOverride
144
-
145
- # Icon must end with two \r
146
- Icon
147
- Icon?
148
-
149
- # Thumbnails
150
- ._*
151
-
152
- # Files that might appear in the root of a volume
153
- .DocumentRevisions-V100
154
- .fseventsd
155
- .Spotlight-V100
156
- .TemporaryItems
157
- .Trashes
158
- .VolumeIcon.icns
159
- .com.apple.timemachine.donotpresent
160
-
161
- # Directories potentially created on remote AFP share
162
- .AppleDB
163
- .AppleDesktop
164
- Network Trash Folder
165
- Temporary Items
166
- .apdisk
167
-
168
-
169
- # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
170
- # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
171
- # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
172
-
173
- # User-specific stuff:
174
- .idea/*
175
- .idea/**/workspace.xml
176
- .idea/**/tasks.xml
177
- .idea/dictionaries
178
- .html # Bokeh Plots
179
- .pg # TensorFlow Frozen Graphs
180
- .avi # videos
181
-
182
- # Sensitive or high-churn files:
183
- .idea/**/dataSources/
184
- .idea/**/dataSources.ids
185
- .idea/**/dataSources.local.xml
186
- .idea/**/sqlDataSources.xml
187
- .idea/**/dynamic.xml
188
- .idea/**/uiDesigner.xml
189
-
190
- # Gradle:
191
- .idea/**/gradle.xml
192
- .idea/**/libraries
193
-
194
- # CMake
195
- cmake-build-debug/
196
- cmake-build-release/
197
-
198
- # Mongo Explorer plugin:
199
- .idea/**/mongoSettings.xml
200
-
201
- ## File-based project format:
202
- *.iws
203
-
204
- ## Plugin-specific files:
205
-
206
- # IntelliJ
207
- out/
208
-
209
- # mpeltonen/sbt-idea plugin
210
- .idea_modules/
211
-
212
- # JIRA plugin
213
- atlassian-ide-plugin.xml
214
-
215
- # Cursive Clojure plugin
216
- .idea/replstate.xml
217
-
218
- # Crashlytics plugin (for Android Studio and IntelliJ)
219
- com_crashlytics_export_strings.xml
220
- crashlytics.properties
221
- crashlytics-build.properties
222
- fabric.properties
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/.gitattributes DELETED
@@ -1,2 +0,0 @@
1
- # this drop notebooks from GitHub language stats
2
- *.ipynb linguist-vendored
 
 
 
ultralytics/yolov5/.gitignore DELETED
@@ -1,256 +0,0 @@
1
- # Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
2
- *.jpg
3
- *.jpeg
4
- *.png
5
- *.bmp
6
- *.tif
7
- *.tiff
8
- *.heic
9
- *.JPG
10
- *.JPEG
11
- *.PNG
12
- *.BMP
13
- *.TIF
14
- *.TIFF
15
- *.HEIC
16
- *.mp4
17
- *.mov
18
- *.MOV
19
- *.avi
20
- *.data
21
- *.json
22
- *.cfg
23
- !setup.cfg
24
- !cfg/yolov3*.cfg
25
-
26
- storage.googleapis.com
27
- runs/*
28
- data/*
29
- data/images/*
30
- !data/*.yaml
31
- !data/hyps
32
- !data/scripts
33
- !data/images
34
- !data/images/zidane.jpg
35
- !data/images/bus.jpg
36
- !data/*.sh
37
-
38
- results*.csv
39
-
40
- # Datasets -------------------------------------------------------------------------------------------------------------
41
- coco/
42
- coco128/
43
- VOC/
44
-
45
- # MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
46
- *.m~
47
- *.mat
48
- !targets*.mat
49
-
50
- # Neural Network weights -----------------------------------------------------------------------------------------------
51
- *.weights
52
- *.pt
53
- *.pb
54
- *.onnx
55
- *.engine
56
- *.mlmodel
57
- *.torchscript
58
- *.tflite
59
- *.h5
60
- *_saved_model/
61
- *_web_model/
62
- *_openvino_model/
63
- darknet53.conv.74
64
- yolov3-tiny.conv.15
65
-
66
- # GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
67
- # Byte-compiled / optimized / DLL files
68
- __pycache__/
69
- *.py[cod]
70
- *$py.class
71
-
72
- # C extensions
73
- *.so
74
-
75
- # Distribution / packaging
76
- .Python
77
- env/
78
- build/
79
- develop-eggs/
80
- dist/
81
- downloads/
82
- eggs/
83
- .eggs/
84
- lib/
85
- lib64/
86
- parts/
87
- sdist/
88
- var/
89
- wheels/
90
- *.egg-info/
91
- /wandb/
92
- .installed.cfg
93
- *.egg
94
-
95
-
96
- # PyInstaller
97
- # Usually these files are written by a python script from a template
98
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
99
- *.manifest
100
- *.spec
101
-
102
- # Installer logs
103
- pip-log.txt
104
- pip-delete-this-directory.txt
105
-
106
- # Unit test / coverage reports
107
- htmlcov/
108
- .tox/
109
- .coverage
110
- .coverage.*
111
- .cache
112
- nosetests.xml
113
- coverage.xml
114
- *.cover
115
- .hypothesis/
116
-
117
- # Translations
118
- *.mo
119
- *.pot
120
-
121
- # Django stuff:
122
- *.log
123
- local_settings.py
124
-
125
- # Flask stuff:
126
- instance/
127
- .webassets-cache
128
-
129
- # Scrapy stuff:
130
- .scrapy
131
-
132
- # Sphinx documentation
133
- docs/_build/
134
-
135
- # PyBuilder
136
- target/
137
-
138
- # Jupyter Notebook
139
- .ipynb_checkpoints
140
-
141
- # pyenv
142
- .python-version
143
-
144
- # celery beat schedule file
145
- celerybeat-schedule
146
-
147
- # SageMath parsed files
148
- *.sage.py
149
-
150
- # dotenv
151
- .env
152
-
153
- # virtualenv
154
- .venv*
155
- venv*/
156
- ENV*/
157
-
158
- # Spyder project settings
159
- .spyderproject
160
- .spyproject
161
-
162
- # Rope project settings
163
- .ropeproject
164
-
165
- # mkdocs documentation
166
- /site
167
-
168
- # mypy
169
- .mypy_cache/
170
-
171
-
172
- # https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
173
-
174
- # General
175
- .DS_Store
176
- .AppleDouble
177
- .LSOverride
178
-
179
- # Icon must end with two \r
180
- Icon
181
- Icon?
182
-
183
- # Thumbnails
184
- ._*
185
-
186
- # Files that might appear in the root of a volume
187
- .DocumentRevisions-V100
188
- .fseventsd
189
- .Spotlight-V100
190
- .TemporaryItems
191
- .Trashes
192
- .VolumeIcon.icns
193
- .com.apple.timemachine.donotpresent
194
-
195
- # Directories potentially created on remote AFP share
196
- .AppleDB
197
- .AppleDesktop
198
- Network Trash Folder
199
- Temporary Items
200
- .apdisk
201
-
202
-
203
- # https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
204
- # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
205
- # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
206
-
207
- # User-specific stuff:
208
- .idea/*
209
- .idea/**/workspace.xml
210
- .idea/**/tasks.xml
211
- .idea/dictionaries
212
- .html # Bokeh Plots
213
- .pg # TensorFlow Frozen Graphs
214
- .avi # videos
215
-
216
- # Sensitive or high-churn files:
217
- .idea/**/dataSources/
218
- .idea/**/dataSources.ids
219
- .idea/**/dataSources.local.xml
220
- .idea/**/sqlDataSources.xml
221
- .idea/**/dynamic.xml
222
- .idea/**/uiDesigner.xml
223
-
224
- # Gradle:
225
- .idea/**/gradle.xml
226
- .idea/**/libraries
227
-
228
- # CMake
229
- cmake-build-debug/
230
- cmake-build-release/
231
-
232
- # Mongo Explorer plugin:
233
- .idea/**/mongoSettings.xml
234
-
235
- ## File-based project format:
236
- *.iws
237
-
238
- ## Plugin-specific files:
239
-
240
- # IntelliJ
241
- out/
242
-
243
- # mpeltonen/sbt-idea plugin
244
- .idea_modules/
245
-
246
- # JIRA plugin
247
- atlassian-ide-plugin.xml
248
-
249
- # Cursive Clojure plugin
250
- .idea/replstate.xml
251
-
252
- # Crashlytics plugin (for Android Studio and IntelliJ)
253
- com_crashlytics_export_strings.xml
254
- crashlytics.properties
255
- crashlytics-build.properties
256
- fabric.properties
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/.pre-commit-config.yaml DELETED
@@ -1,66 +0,0 @@
1
- # Define hooks for code formations
2
- # Will be applied on any updated commit files if a user has installed and linked commit hook
3
-
4
- default_language_version:
5
- python: python3.8
6
-
7
- # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
8
- ci:
9
- autofix_prs: true
10
- autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
11
- autoupdate_schedule: quarterly
12
- # submodules: true
13
-
14
- repos:
15
- - repo: https://github.com/pre-commit/pre-commit-hooks
16
- rev: v4.1.0
17
- hooks:
18
- - id: end-of-file-fixer
19
- - id: trailing-whitespace
20
- - id: check-case-conflict
21
- - id: check-yaml
22
- - id: check-toml
23
- - id: pretty-format-json
24
- - id: check-docstring-first
25
-
26
- - repo: https://github.com/asottile/pyupgrade
27
- rev: v2.31.0
28
- hooks:
29
- - id: pyupgrade
30
- args: [--py36-plus]
31
- name: Upgrade code
32
-
33
- - repo: https://github.com/PyCQA/isort
34
- rev: 5.10.1
35
- hooks:
36
- - id: isort
37
- name: Sort imports
38
-
39
- # TODO
40
- #- repo: https://github.com/pre-commit/mirrors-yapf
41
- # rev: v0.31.0
42
- # hooks:
43
- # - id: yapf
44
- # name: formatting
45
-
46
- # TODO
47
- #- repo: https://github.com/executablebooks/mdformat
48
- # rev: 0.7.7
49
- # hooks:
50
- # - id: mdformat
51
- # additional_dependencies:
52
- # - mdformat-gfm
53
- # - mdformat-black
54
- # - mdformat_frontmatter
55
-
56
- # TODO
57
- #- repo: https://github.com/asottile/yesqa
58
- # rev: v1.2.3
59
- # hooks:
60
- # - id: yesqa
61
-
62
- - repo: https://github.com/PyCQA/flake8
63
- rev: 4.0.1
64
- hooks:
65
- - id: flake8
66
- name: PEP8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/CONTRIBUTING.md DELETED
@@ -1,94 +0,0 @@
1
- ## Contributing to YOLOv5 🚀
2
-
3
- We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's:
4
-
5
- - Reporting a bug
6
- - Discussing the current state of the code
7
- - Submitting a fix
8
- - Proposing a new feature
9
- - Becoming a maintainer
10
-
11
- YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be
12
- helping push the frontiers of what's possible in AI 😃!
13
-
14
- ## Submitting a Pull Request (PR) 🛠️
15
-
16
- Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps:
17
-
18
- ### 1. Select File to Update
19
-
20
- Select `requirements.txt` to update by clicking on it in GitHub.
21
- <p align="center"><img width="800" alt="PR_step1" src="https://user-images.githubusercontent.com/26833433/122260847-08be2600-ced4-11eb-828b-8287ace4136c.png"></p>
22
-
23
- ### 2. Click 'Edit this file'
24
-
25
- Button is in top-right corner.
26
- <p align="center"><img width="800" alt="PR_step2" src="https://user-images.githubusercontent.com/26833433/122260844-06f46280-ced4-11eb-9eec-b8a24be519ca.png"></p>
27
-
28
- ### 3. Make Changes
29
-
30
- Change `matplotlib` version from `3.2.2` to `3.3`.
31
- <p align="center"><img width="800" alt="PR_step3" src="https://user-images.githubusercontent.com/26833433/122260853-0a87e980-ced4-11eb-9fd2-3650fb6e0842.png"></p>
32
-
33
- ### 4. Preview Changes and Submit PR
34
-
35
- Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch**
36
- for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose
37
- changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!
38
- <p align="center"><img width="800" alt="PR_step4" src="https://user-images.githubusercontent.com/26833433/122260856-0b208000-ced4-11eb-8e8e-77b6151cbcc3.png"></p>
39
-
40
- ### PR recommendations
41
-
42
- To allow your work to be integrated as seamlessly as possible, we advise you to:
43
-
44
- - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an
45
- automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may
46
- be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name
47
- of your local branch:
48
-
49
- ```bash
50
- git remote add upstream https://github.com/ultralytics/yolov5.git
51
- git fetch upstream
52
- # git checkout feature # <--- replace 'feature' with local branch name
53
- git merge upstream/master
54
- git push -u origin -f
55
- ```
56
-
57
- - ✅ Verify all Continuous Integration (CI) **checks are passing**.
58
- - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase
59
- but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee
60
-
61
- ## Submitting a Bug Report 🐛
62
-
63
- If you spot a problem with YOLOv5 please submit a Bug Report!
64
-
65
- For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few
66
- short guidelines below to help users provide what we need in order to get started.
67
-
68
- When asking a question, people will be better able to provide help if you provide **code** that they can easily
69
- understand and use to **reproduce** the problem. This is referred to by community members as creating
70
- a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces
71
- the problem should be:
72
-
73
- * ✅ **Minimal** – Use as little code as possible that still produces the same problem
74
- * ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself
75
- * ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
76
-
77
- In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code
78
- should be:
79
-
80
- * ✅ **Current** – Verify that your code is up-to-date with current
81
- GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new
82
- copy to ensure your problem has not already been resolved by previous commits.
83
- * ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this
84
- repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️.
85
-
86
- If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **
87
- Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing
88
- a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better
89
- understand and diagnose your problem.
90
-
91
- ## License
92
-
93
- By contributing, you agree that your contributions will be licensed under
94
- the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/Dockerfile DELETED
@@ -1,65 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
4
- FROM nvcr.io/nvidia/pytorch:21.10-py3
5
-
6
- # Install linux packages
7
- RUN apt update && apt install -y zip htop screen libgl1-mesa-glx
8
-
9
- # Install python dependencies
10
- COPY requirements.txt .
11
- RUN python -m pip install --upgrade pip
12
- RUN pip uninstall -y torch torchvision torchtext
13
- RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \
14
- torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
15
- # RUN pip install --no-cache -U torch torchvision
16
-
17
- # Create working directory
18
- RUN mkdir -p /usr/src/app
19
- WORKDIR /usr/src/app
20
-
21
- # Copy contents
22
- RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app
23
- # COPY . /usr/src/app
24
-
25
- # Downloads to user config dir
26
- ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/
27
-
28
- # Set environment variables
29
- # ENV HOME=/usr/src/app
30
-
31
-
32
- # Usage Examples -------------------------------------------------------------------------------------------------------
33
-
34
- # Build and Push
35
- # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
36
-
37
- # Pull and Run
38
- # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
39
-
40
- # Pull and Run with local directory access
41
- # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
42
-
43
- # Kill all
44
- # sudo docker kill $(sudo docker ps -q)
45
-
46
- # Kill all image-based
47
- # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
48
-
49
- # Bash into running container
50
- # sudo docker exec -it 5a9b5863d93d bash
51
-
52
- # Bash into stopped container
53
- # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash
54
-
55
- # Clean up
56
- # docker system prune -a --volumes
57
-
58
- # Update Ubuntu drivers
59
- # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
60
-
61
- # DDP test
62
- # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
63
-
64
- # GCP VM from Image
65
- # docker.io/ultralytics/yolov5:latest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/LICENSE DELETED
@@ -1,674 +0,0 @@
1
- GNU GENERAL PUBLIC LICENSE
2
- Version 3, 29 June 2007
3
-
4
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
5
- Everyone is permitted to copy and distribute verbatim copies
6
- of this license document, but changing it is not allowed.
7
-
8
- Preamble
9
-
10
- The GNU General Public License is a free, copyleft license for
11
- software and other kinds of works.
12
-
13
- The licenses for most software and other practical works are designed
14
- to take away your freedom to share and change the works. By contrast,
15
- the GNU General Public License is intended to guarantee your freedom to
16
- share and change all versions of a program--to make sure it remains free
17
- software for all its users. We, the Free Software Foundation, use the
18
- GNU General Public License for most of our software; it applies also to
19
- any other work released this way by its authors. You can apply it to
20
- your programs, too.
21
-
22
- When we speak of free software, we are referring to freedom, not
23
- price. Our General Public Licenses are designed to make sure that you
24
- have the freedom to distribute copies of free software (and charge for
25
- them if you wish), that you receive source code or can get it if you
26
- want it, that you can change the software or use pieces of it in new
27
- free programs, and that you know you can do these things.
28
-
29
- To protect your rights, we need to prevent others from denying you
30
- these rights or asking you to surrender the rights. Therefore, you have
31
- certain responsibilities if you distribute copies of the software, or if
32
- you modify it: responsibilities to respect the freedom of others.
33
-
34
- For example, if you distribute copies of such a program, whether
35
- gratis or for a fee, you must pass on to the recipients the same
36
- freedoms that you received. You must make sure that they, too, receive
37
- or can get the source code. And you must show them these terms so they
38
- know their rights.
39
-
40
- Developers that use the GNU GPL protect your rights with two steps:
41
- (1) assert copyright on the software, and (2) offer you this License
42
- giving you legal permission to copy, distribute and/or modify it.
43
-
44
- For the developers' and authors' protection, the GPL clearly explains
45
- that there is no warranty for this free software. For both users' and
46
- authors' sake, the GPL requires that modified versions be marked as
47
- changed, so that their problems will not be attributed erroneously to
48
- authors of previous versions.
49
-
50
- Some devices are designed to deny users access to install or run
51
- modified versions of the software inside them, although the manufacturer
52
- can do so. This is fundamentally incompatible with the aim of
53
- protecting users' freedom to change the software. The systematic
54
- pattern of such abuse occurs in the area of products for individuals to
55
- use, which is precisely where it is most unacceptable. Therefore, we
56
- have designed this version of the GPL to prohibit the practice for those
57
- products. If such problems arise substantially in other domains, we
58
- stand ready to extend this provision to those domains in future versions
59
- of the GPL, as needed to protect the freedom of users.
60
-
61
- Finally, every program is threatened constantly by software patents.
62
- States should not allow patents to restrict development and use of
63
- software on general-purpose computers, but in those that do, we wish to
64
- avoid the special danger that patents applied to a free program could
65
- make it effectively proprietary. To prevent this, the GPL assures that
66
- patents cannot be used to render the program non-free.
67
-
68
- The precise terms and conditions for copying, distribution and
69
- modification follow.
70
-
71
- TERMS AND CONDITIONS
72
-
73
- 0. Definitions.
74
-
75
- "This License" refers to version 3 of the GNU General Public License.
76
-
77
- "Copyright" also means copyright-like laws that apply to other kinds of
78
- works, such as semiconductor masks.
79
-
80
- "The Program" refers to any copyrightable work licensed under this
81
- License. Each licensee is addressed as "you". "Licensees" and
82
- "recipients" may be individuals or organizations.
83
-
84
- To "modify" a work means to copy from or adapt all or part of the work
85
- in a fashion requiring copyright permission, other than the making of an
86
- exact copy. The resulting work is called a "modified version" of the
87
- earlier work or a work "based on" the earlier work.
88
-
89
- A "covered work" means either the unmodified Program or a work based
90
- on the Program.
91
-
92
- To "propagate" a work means to do anything with it that, without
93
- permission, would make you directly or secondarily liable for
94
- infringement under applicable copyright law, except executing it on a
95
- computer or modifying a private copy. Propagation includes copying,
96
- distribution (with or without modification), making available to the
97
- public, and in some countries other activities as well.
98
-
99
- To "convey" a work means any kind of propagation that enables other
100
- parties to make or receive copies. Mere interaction with a user through
101
- a computer network, with no transfer of a copy, is not conveying.
102
-
103
- An interactive user interface displays "Appropriate Legal Notices"
104
- to the extent that it includes a convenient and prominently visible
105
- feature that (1) displays an appropriate copyright notice, and (2)
106
- tells the user that there is no warranty for the work (except to the
107
- extent that warranties are provided), that licensees may convey the
108
- work under this License, and how to view a copy of this License. If
109
- the interface presents a list of user commands or options, such as a
110
- menu, a prominent item in the list meets this criterion.
111
-
112
- 1. Source Code.
113
-
114
- The "source code" for a work means the preferred form of the work
115
- for making modifications to it. "Object code" means any non-source
116
- form of a work.
117
-
118
- A "Standard Interface" means an interface that either is an official
119
- standard defined by a recognized standards body, or, in the case of
120
- interfaces specified for a particular programming language, one that
121
- is widely used among developers working in that language.
122
-
123
- The "System Libraries" of an executable work include anything, other
124
- than the work as a whole, that (a) is included in the normal form of
125
- packaging a Major Component, but which is not part of that Major
126
- Component, and (b) serves only to enable use of the work with that
127
- Major Component, or to implement a Standard Interface for which an
128
- implementation is available to the public in source code form. A
129
- "Major Component", in this context, means a major essential component
130
- (kernel, window system, and so on) of the specific operating system
131
- (if any) on which the executable work runs, or a compiler used to
132
- produce the work, or an object code interpreter used to run it.
133
-
134
- The "Corresponding Source" for a work in object code form means all
135
- the source code needed to generate, install, and (for an executable
136
- work) run the object code and to modify the work, including scripts to
137
- control those activities. However, it does not include the work's
138
- System Libraries, or general-purpose tools or generally available free
139
- programs which are used unmodified in performing those activities but
140
- which are not part of the work. For example, Corresponding Source
141
- includes interface definition files associated with source files for
142
- the work, and the source code for shared libraries and dynamically
143
- linked subprograms that the work is specifically designed to require,
144
- such as by intimate data communication or control flow between those
145
- subprograms and other parts of the work.
146
-
147
- The Corresponding Source need not include anything that users
148
- can regenerate automatically from other parts of the Corresponding
149
- Source.
150
-
151
- The Corresponding Source for a work in source code form is that
152
- same work.
153
-
154
- 2. Basic Permissions.
155
-
156
- All rights granted under this License are granted for the term of
157
- copyright on the Program, and are irrevocable provided the stated
158
- conditions are met. This License explicitly affirms your unlimited
159
- permission to run the unmodified Program. The output from running a
160
- covered work is covered by this License only if the output, given its
161
- content, constitutes a covered work. This License acknowledges your
162
- rights of fair use or other equivalent, as provided by copyright law.
163
-
164
- You may make, run and propagate covered works that you do not
165
- convey, without conditions so long as your license otherwise remains
166
- in force. You may convey covered works to others for the sole purpose
167
- of having them make modifications exclusively for you, or provide you
168
- with facilities for running those works, provided that you comply with
169
- the terms of this License in conveying all material for which you do
170
- not control copyright. Those thus making or running the covered works
171
- for you must do so exclusively on your behalf, under your direction
172
- and control, on terms that prohibit them from making any copies of
173
- your copyrighted material outside their relationship with you.
174
-
175
- Conveying under any other circumstances is permitted solely under
176
- the conditions stated below. Sublicensing is not allowed; section 10
177
- makes it unnecessary.
178
-
179
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
-
181
- No covered work shall be deemed part of an effective technological
182
- measure under any applicable law fulfilling obligations under article
183
- 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
- similar laws prohibiting or restricting circumvention of such
185
- measures.
186
-
187
- When you convey a covered work, you waive any legal power to forbid
188
- circumvention of technological measures to the extent such circumvention
189
- is effected by exercising rights under this License with respect to
190
- the covered work, and you disclaim any intention to limit operation or
191
- modification of the work as a means of enforcing, against the work's
192
- users, your or third parties' legal rights to forbid circumvention of
193
- technological measures.
194
-
195
- 4. Conveying Verbatim Copies.
196
-
197
- You may convey verbatim copies of the Program's source code as you
198
- receive it, in any medium, provided that you conspicuously and
199
- appropriately publish on each copy an appropriate copyright notice;
200
- keep intact all notices stating that this License and any
201
- non-permissive terms added in accord with section 7 apply to the code;
202
- keep intact all notices of the absence of any warranty; and give all
203
- recipients a copy of this License along with the Program.
204
-
205
- You may charge any price or no price for each copy that you convey,
206
- and you may offer support or warranty protection for a fee.
207
-
208
- 5. Conveying Modified Source Versions.
209
-
210
- You may convey a work based on the Program, or the modifications to
211
- produce it from the Program, in the form of source code under the
212
- terms of section 4, provided that you also meet all of these conditions:
213
-
214
- a) The work must carry prominent notices stating that you modified
215
- it, and giving a relevant date.
216
-
217
- b) The work must carry prominent notices stating that it is
218
- released under this License and any conditions added under section
219
- 7. This requirement modifies the requirement in section 4 to
220
- "keep intact all notices".
221
-
222
- c) You must license the entire work, as a whole, under this
223
- License to anyone who comes into possession of a copy. This
224
- License will therefore apply, along with any applicable section 7
225
- additional terms, to the whole of the work, and all its parts,
226
- regardless of how they are packaged. This License gives no
227
- permission to license the work in any other way, but it does not
228
- invalidate such permission if you have separately received it.
229
-
230
- d) If the work has interactive user interfaces, each must display
231
- Appropriate Legal Notices; however, if the Program has interactive
232
- interfaces that do not display Appropriate Legal Notices, your
233
- work need not make them do so.
234
-
235
- A compilation of a covered work with other separate and independent
236
- works, which are not by their nature extensions of the covered work,
237
- and which are not combined with it such as to form a larger program,
238
- in or on a volume of a storage or distribution medium, is called an
239
- "aggregate" if the compilation and its resulting copyright are not
240
- used to limit the access or legal rights of the compilation's users
241
- beyond what the individual works permit. Inclusion of a covered work
242
- in an aggregate does not cause this License to apply to the other
243
- parts of the aggregate.
244
-
245
- 6. Conveying Non-Source Forms.
246
-
247
- You may convey a covered work in object code form under the terms
248
- of sections 4 and 5, provided that you also convey the
249
- machine-readable Corresponding Source under the terms of this License,
250
- in one of these ways:
251
-
252
- a) Convey the object code in, or embodied in, a physical product
253
- (including a physical distribution medium), accompanied by the
254
- Corresponding Source fixed on a durable physical medium
255
- customarily used for software interchange.
256
-
257
- b) Convey the object code in, or embodied in, a physical product
258
- (including a physical distribution medium), accompanied by a
259
- written offer, valid for at least three years and valid for as
260
- long as you offer spare parts or customer support for that product
261
- model, to give anyone who possesses the object code either (1) a
262
- copy of the Corresponding Source for all the software in the
263
- product that is covered by this License, on a durable physical
264
- medium customarily used for software interchange, for a price no
265
- more than your reasonable cost of physically performing this
266
- conveying of source, or (2) access to copy the
267
- Corresponding Source from a network server at no charge.
268
-
269
- c) Convey individual copies of the object code with a copy of the
270
- written offer to provide the Corresponding Source. This
271
- alternative is allowed only occasionally and noncommercially, and
272
- only if you received the object code with such an offer, in accord
273
- with subsection 6b.
274
-
275
- d) Convey the object code by offering access from a designated
276
- place (gratis or for a charge), and offer equivalent access to the
277
- Corresponding Source in the same way through the same place at no
278
- further charge. You need not require recipients to copy the
279
- Corresponding Source along with the object code. If the place to
280
- copy the object code is a network server, the Corresponding Source
281
- may be on a different server (operated by you or a third party)
282
- that supports equivalent copying facilities, provided you maintain
283
- clear directions next to the object code saying where to find the
284
- Corresponding Source. Regardless of what server hosts the
285
- Corresponding Source, you remain obligated to ensure that it is
286
- available for as long as needed to satisfy these requirements.
287
-
288
- e) Convey the object code using peer-to-peer transmission, provided
289
- you inform other peers where the object code and Corresponding
290
- Source of the work are being offered to the general public at no
291
- charge under subsection 6d.
292
-
293
- A separable portion of the object code, whose source code is excluded
294
- from the Corresponding Source as a System Library, need not be
295
- included in conveying the object code work.
296
-
297
- A "User Product" is either (1) a "consumer product", which means any
298
- tangible personal property which is normally used for personal, family,
299
- or household purposes, or (2) anything designed or sold for incorporation
300
- into a dwelling. In determining whether a product is a consumer product,
301
- doubtful cases shall be resolved in favor of coverage. For a particular
302
- product received by a particular user, "normally used" refers to a
303
- typical or common use of that class of product, regardless of the status
304
- of the particular user or of the way in which the particular user
305
- actually uses, or expects or is expected to use, the product. A product
306
- is a consumer product regardless of whether the product has substantial
307
- commercial, industrial or non-consumer uses, unless such uses represent
308
- the only significant mode of use of the product.
309
-
310
- "Installation Information" for a User Product means any methods,
311
- procedures, authorization keys, or other information required to install
312
- and execute modified versions of a covered work in that User Product from
313
- a modified version of its Corresponding Source. The information must
314
- suffice to ensure that the continued functioning of the modified object
315
- code is in no case prevented or interfered with solely because
316
- modification has been made.
317
-
318
- If you convey an object code work under this section in, or with, or
319
- specifically for use in, a User Product, and the conveying occurs as
320
- part of a transaction in which the right of possession and use of the
321
- User Product is transferred to the recipient in perpetuity or for a
322
- fixed term (regardless of how the transaction is characterized), the
323
- Corresponding Source conveyed under this section must be accompanied
324
- by the Installation Information. But this requirement does not apply
325
- if neither you nor any third party retains the ability to install
326
- modified object code on the User Product (for example, the work has
327
- been installed in ROM).
328
-
329
- The requirement to provide Installation Information does not include a
330
- requirement to continue to provide support service, warranty, or updates
331
- for a work that has been modified or installed by the recipient, or for
332
- the User Product in which it has been modified or installed. Access to a
333
- network may be denied when the modification itself materially and
334
- adversely affects the operation of the network or violates the rules and
335
- protocols for communication across the network.
336
-
337
- Corresponding Source conveyed, and Installation Information provided,
338
- in accord with this section must be in a format that is publicly
339
- documented (and with an implementation available to the public in
340
- source code form), and must require no special password or key for
341
- unpacking, reading or copying.
342
-
343
- 7. Additional Terms.
344
-
345
- "Additional permissions" are terms that supplement the terms of this
346
- License by making exceptions from one or more of its conditions.
347
- Additional permissions that are applicable to the entire Program shall
348
- be treated as though they were included in this License, to the extent
349
- that they are valid under applicable law. If additional permissions
350
- apply only to part of the Program, that part may be used separately
351
- under those permissions, but the entire Program remains governed by
352
- this License without regard to the additional permissions.
353
-
354
- When you convey a copy of a covered work, you may at your option
355
- remove any additional permissions from that copy, or from any part of
356
- it. (Additional permissions may be written to require their own
357
- removal in certain cases when you modify the work.) You may place
358
- additional permissions on material, added by you to a covered work,
359
- for which you have or can give appropriate copyright permission.
360
-
361
- Notwithstanding any other provision of this License, for material you
362
- add to a covered work, you may (if authorized by the copyright holders of
363
- that material) supplement the terms of this License with terms:
364
-
365
- a) Disclaiming warranty or limiting liability differently from the
366
- terms of sections 15 and 16 of this License; or
367
-
368
- b) Requiring preservation of specified reasonable legal notices or
369
- author attributions in that material or in the Appropriate Legal
370
- Notices displayed by works containing it; or
371
-
372
- c) Prohibiting misrepresentation of the origin of that material, or
373
- requiring that modified versions of such material be marked in
374
- reasonable ways as different from the original version; or
375
-
376
- d) Limiting the use for publicity purposes of names of licensors or
377
- authors of the material; or
378
-
379
- e) Declining to grant rights under trademark law for use of some
380
- trade names, trademarks, or service marks; or
381
-
382
- f) Requiring indemnification of licensors and authors of that
383
- material by anyone who conveys the material (or modified versions of
384
- it) with contractual assumptions of liability to the recipient, for
385
- any liability that these contractual assumptions directly impose on
386
- those licensors and authors.
387
-
388
- All other non-permissive additional terms are considered "further
389
- restrictions" within the meaning of section 10. If the Program as you
390
- received it, or any part of it, contains a notice stating that it is
391
- governed by this License along with a term that is a further
392
- restriction, you may remove that term. If a license document contains
393
- a further restriction but permits relicensing or conveying under this
394
- License, you may add to a covered work material governed by the terms
395
- of that license document, provided that the further restriction does
396
- not survive such relicensing or conveying.
397
-
398
- If you add terms to a covered work in accord with this section, you
399
- must place, in the relevant source files, a statement of the
400
- additional terms that apply to those files, or a notice indicating
401
- where to find the applicable terms.
402
-
403
- Additional terms, permissive or non-permissive, may be stated in the
404
- form of a separately written license, or stated as exceptions;
405
- the above requirements apply either way.
406
-
407
- 8. Termination.
408
-
409
- You may not propagate or modify a covered work except as expressly
410
- provided under this License. Any attempt otherwise to propagate or
411
- modify it is void, and will automatically terminate your rights under
412
- this License (including any patent licenses granted under the third
413
- paragraph of section 11).
414
-
415
- However, if you cease all violation of this License, then your
416
- license from a particular copyright holder is reinstated (a)
417
- provisionally, unless and until the copyright holder explicitly and
418
- finally terminates your license, and (b) permanently, if the copyright
419
- holder fails to notify you of the violation by some reasonable means
420
- prior to 60 days after the cessation.
421
-
422
- Moreover, your license from a particular copyright holder is
423
- reinstated permanently if the copyright holder notifies you of the
424
- violation by some reasonable means, this is the first time you have
425
- received notice of violation of this License (for any work) from that
426
- copyright holder, and you cure the violation prior to 30 days after
427
- your receipt of the notice.
428
-
429
- Termination of your rights under this section does not terminate the
430
- licenses of parties who have received copies or rights from you under
431
- this License. If your rights have been terminated and not permanently
432
- reinstated, you do not qualify to receive new licenses for the same
433
- material under section 10.
434
-
435
- 9. Acceptance Not Required for Having Copies.
436
-
437
- You are not required to accept this License in order to receive or
438
- run a copy of the Program. Ancillary propagation of a covered work
439
- occurring solely as a consequence of using peer-to-peer transmission
440
- to receive a copy likewise does not require acceptance. However,
441
- nothing other than this License grants you permission to propagate or
442
- modify any covered work. These actions infringe copyright if you do
443
- not accept this License. Therefore, by modifying or propagating a
444
- covered work, you indicate your acceptance of this License to do so.
445
-
446
- 10. Automatic Licensing of Downstream Recipients.
447
-
448
- Each time you convey a covered work, the recipient automatically
449
- receives a license from the original licensors, to run, modify and
450
- propagate that work, subject to this License. You are not responsible
451
- for enforcing compliance by third parties with this License.
452
-
453
- An "entity transaction" is a transaction transferring control of an
454
- organization, or substantially all assets of one, or subdividing an
455
- organization, or merging organizations. If propagation of a covered
456
- work results from an entity transaction, each party to that
457
- transaction who receives a copy of the work also receives whatever
458
- licenses to the work the party's predecessor in interest had or could
459
- give under the previous paragraph, plus a right to possession of the
460
- Corresponding Source of the work from the predecessor in interest, if
461
- the predecessor has it or can get it with reasonable efforts.
462
-
463
- You may not impose any further restrictions on the exercise of the
464
- rights granted or affirmed under this License. For example, you may
465
- not impose a license fee, royalty, or other charge for exercise of
466
- rights granted under this License, and you may not initiate litigation
467
- (including a cross-claim or counterclaim in a lawsuit) alleging that
468
- any patent claim is infringed by making, using, selling, offering for
469
- sale, or importing the Program or any portion of it.
470
-
471
- 11. Patents.
472
-
473
- A "contributor" is a copyright holder who authorizes use under this
474
- License of the Program or a work on which the Program is based. The
475
- work thus licensed is called the contributor's "contributor version".
476
-
477
- A contributor's "essential patent claims" are all patent claims
478
- owned or controlled by the contributor, whether already acquired or
479
- hereafter acquired, that would be infringed by some manner, permitted
480
- by this License, of making, using, or selling its contributor version,
481
- but do not include claims that would be infringed only as a
482
- consequence of further modification of the contributor version. For
483
- purposes of this definition, "control" includes the right to grant
484
- patent sublicenses in a manner consistent with the requirements of
485
- this License.
486
-
487
- Each contributor grants you a non-exclusive, worldwide, royalty-free
488
- patent license under the contributor's essential patent claims, to
489
- make, use, sell, offer for sale, import and otherwise run, modify and
490
- propagate the contents of its contributor version.
491
-
492
- In the following three paragraphs, a "patent license" is any express
493
- agreement or commitment, however denominated, not to enforce a patent
494
- (such as an express permission to practice a patent or covenant not to
495
- sue for patent infringement). To "grant" such a patent license to a
496
- party means to make such an agreement or commitment not to enforce a
497
- patent against the party.
498
-
499
- If you convey a covered work, knowingly relying on a patent license,
500
- and the Corresponding Source of the work is not available for anyone
501
- to copy, free of charge and under the terms of this License, through a
502
- publicly available network server or other readily accessible means,
503
- then you must either (1) cause the Corresponding Source to be so
504
- available, or (2) arrange to deprive yourself of the benefit of the
505
- patent license for this particular work, or (3) arrange, in a manner
506
- consistent with the requirements of this License, to extend the patent
507
- license to downstream recipients. "Knowingly relying" means you have
508
- actual knowledge that, but for the patent license, your conveying the
509
- covered work in a country, or your recipient's use of the covered work
510
- in a country, would infringe one or more identifiable patents in that
511
- country that you have reason to believe are valid.
512
-
513
- If, pursuant to or in connection with a single transaction or
514
- arrangement, you convey, or propagate by procuring conveyance of, a
515
- covered work, and grant a patent license to some of the parties
516
- receiving the covered work authorizing them to use, propagate, modify
517
- or convey a specific copy of the covered work, then the patent license
518
- you grant is automatically extended to all recipients of the covered
519
- work and works based on it.
520
-
521
- A patent license is "discriminatory" if it does not include within
522
- the scope of its coverage, prohibits the exercise of, or is
523
- conditioned on the non-exercise of one or more of the rights that are
524
- specifically granted under this License. You may not convey a covered
525
- work if you are a party to an arrangement with a third party that is
526
- in the business of distributing software, under which you make payment
527
- to the third party based on the extent of your activity of conveying
528
- the work, and under which the third party grants, to any of the
529
- parties who would receive the covered work from you, a discriminatory
530
- patent license (a) in connection with copies of the covered work
531
- conveyed by you (or copies made from those copies), or (b) primarily
532
- for and in connection with specific products or compilations that
533
- contain the covered work, unless you entered into that arrangement,
534
- or that patent license was granted, prior to 28 March 2007.
535
-
536
- Nothing in this License shall be construed as excluding or limiting
537
- any implied license or other defenses to infringement that may
538
- otherwise be available to you under applicable patent law.
539
-
540
- 12. No Surrender of Others' Freedom.
541
-
542
- If conditions are imposed on you (whether by court order, agreement or
543
- otherwise) that contradict the conditions of this License, they do not
544
- excuse you from the conditions of this License. If you cannot convey a
545
- covered work so as to satisfy simultaneously your obligations under this
546
- License and any other pertinent obligations, then as a consequence you may
547
- not convey it at all. For example, if you agree to terms that obligate you
548
- to collect a royalty for further conveying from those to whom you convey
549
- the Program, the only way you could satisfy both those terms and this
550
- License would be to refrain entirely from conveying the Program.
551
-
552
- 13. Use with the GNU Affero General Public License.
553
-
554
- Notwithstanding any other provision of this License, you have
555
- permission to link or combine any covered work with a work licensed
556
- under version 3 of the GNU Affero General Public License into a single
557
- combined work, and to convey the resulting work. The terms of this
558
- License will continue to apply to the part which is the covered work,
559
- but the special requirements of the GNU Affero General Public License,
560
- section 13, concerning interaction through a network will apply to the
561
- combination as such.
562
-
563
- 14. Revised Versions of this License.
564
-
565
- The Free Software Foundation may publish revised and/or new versions of
566
- the GNU General Public License from time to time. Such new versions will
567
- be similar in spirit to the present version, but may differ in detail to
568
- address new problems or concerns.
569
-
570
- Each version is given a distinguishing version number. If the
571
- Program specifies that a certain numbered version of the GNU General
572
- Public License "or any later version" applies to it, you have the
573
- option of following the terms and conditions either of that numbered
574
- version or of any later version published by the Free Software
575
- Foundation. If the Program does not specify a version number of the
576
- GNU General Public License, you may choose any version ever published
577
- by the Free Software Foundation.
578
-
579
- If the Program specifies that a proxy can decide which future
580
- versions of the GNU General Public License can be used, that proxy's
581
- public statement of acceptance of a version permanently authorizes you
582
- to choose that version for the Program.
583
-
584
- Later license versions may give you additional or different
585
- permissions. However, no additional obligations are imposed on any
586
- author or copyright holder as a result of your choosing to follow a
587
- later version.
588
-
589
- 15. Disclaimer of Warranty.
590
-
591
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
- APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
- HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
- OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
- THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
- PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
- IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
- ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
-
600
- 16. Limitation of Liability.
601
-
602
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
- WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
- THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
- GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
- USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
- DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
- PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
- EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
- SUCH DAMAGES.
611
-
612
- 17. Interpretation of Sections 15 and 16.
613
-
614
- If the disclaimer of warranty and limitation of liability provided
615
- above cannot be given local legal effect according to their terms,
616
- reviewing courts shall apply local law that most closely approximates
617
- an absolute waiver of all civil liability in connection with the
618
- Program, unless a warranty or assumption of liability accompanies a
619
- copy of the Program in return for a fee.
620
-
621
- END OF TERMS AND CONDITIONS
622
-
623
- How to Apply These Terms to Your New Programs
624
-
625
- If you develop a new program, and you want it to be of the greatest
626
- possible use to the public, the best way to achieve this is to make it
627
- free software which everyone can redistribute and change under these terms.
628
-
629
- To do so, attach the following notices to the program. It is safest
630
- to attach them to the start of each source file to most effectively
631
- state the exclusion of warranty; and each file should have at least
632
- the "copyright" line and a pointer to where the full notice is found.
633
-
634
- <one line to give the program's name and a brief idea of what it does.>
635
- Copyright (C) <year> <name of author>
636
-
637
- This program is free software: you can redistribute it and/or modify
638
- it under the terms of the GNU General Public License as published by
639
- the Free Software Foundation, either version 3 of the License, or
640
- (at your option) any later version.
641
-
642
- This program is distributed in the hope that it will be useful,
643
- but WITHOUT ANY WARRANTY; without even the implied warranty of
644
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
- GNU General Public License for more details.
646
-
647
- You should have received a copy of the GNU General Public License
648
- along with this program. If not, see <http://www.gnu.org/licenses/>.
649
-
650
- Also add information on how to contact you by electronic and paper mail.
651
-
652
- If the program does terminal interaction, make it output a short
653
- notice like this when it starts in an interactive mode:
654
-
655
- <program> Copyright (C) <year> <name of author>
656
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
- This is free software, and you are welcome to redistribute it
658
- under certain conditions; type `show c' for details.
659
-
660
- The hypothetical commands `show w' and `show c' should show the appropriate
661
- parts of the General Public License. Of course, your program's commands
662
- might be different; for a GUI interface, you would use an "about box".
663
-
664
- You should also get your employer (if you work as a programmer) or school,
665
- if any, to sign a "copyright disclaimer" for the program, if necessary.
666
- For more information on this, and how to apply and follow the GNU GPL, see
667
- <http://www.gnu.org/licenses/>.
668
-
669
- The GNU General Public License does not permit incorporating your program
670
- into proprietary programs. If your program is a subroutine library, you
671
- may consider it more useful to permit linking proprietary applications with
672
- the library. If this is what you want to do, use the GNU Lesser General
673
- Public License instead of this License. But first, please read
674
- <http://www.gnu.org/philosophy/why-not-lgpl.html>.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/README.md DELETED
@@ -1,304 +0,0 @@
1
- <div align="center">
2
- <p>
3
- <a align="left" href="https://ultralytics.com/yolov5" target="_blank">
4
- <img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/splash.jpg"></a>
5
- </p>
6
- <br>
7
- <div>
8
- <a href="https://github.com/ultralytics/yolov5/actions"><img src="https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg" alt="CI CPU testing"></a>
9
- <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv5 Citation"></a>
10
- <a href="https://hub.docker.com/r/ultralytics/yolov5"><img src="https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker" alt="Docker Pulls"></a>
11
- <br>
12
- <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
13
- <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
14
- <a href="https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg"><img src="https://img.shields.io/badge/Slack-Join_Forum-blue.svg?logo=slack" alt="Join Forum"></a>
15
- </div>
16
-
17
- <br>
18
- <p>
19
- YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents <a href="https://ultralytics.com">Ultralytics</a>
20
- open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
21
- </p>
22
-
23
- <div align="center">
24
- <a href="https://github.com/ultralytics">
25
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="2%"/>
26
- </a>
27
- <img width="2%" />
28
- <a href="https://www.linkedin.com/company/ultralytics">
29
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="2%"/>
30
- </a>
31
- <img width="2%" />
32
- <a href="https://twitter.com/ultralytics">
33
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="2%"/>
34
- </a>
35
- <img width="2%" />
36
- <a href="https://www.producthunt.com/@glenn_jocher">
37
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-producthunt.png" width="2%"/>
38
- </a>
39
- <img width="2%" />
40
- <a href="https://youtube.com/ultralytics">
41
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="2%"/>
42
- </a>
43
- <img width="2%" />
44
- <a href="https://www.facebook.com/ultralytics">
45
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="2%"/>
46
- </a>
47
- <img width="2%" />
48
- <a href="https://www.instagram.com/ultralytics/">
49
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="2%"/>
50
- </a>
51
- </div>
52
-
53
- <!--
54
- <a align="center" href="https://ultralytics.com/yolov5" target="_blank">
55
- <img width="800" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-api.png"></a>
56
- -->
57
-
58
- </div>
59
-
60
- ## <div align="center">Documentation</div>
61
-
62
- See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment.
63
-
64
- ## <div align="center">Quick Start Examples</div>
65
-
66
- <details open>
67
- <summary>Install</summary>
68
-
69
- Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a
70
- [**Python>=3.7.0**](https://www.python.org/) environment, including
71
- [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/).
72
-
73
- ```bash
74
- git clone https://github.com/ultralytics/yolov5 # clone
75
- cd yolov5
76
- pip install -r requirements.txt # install
77
- ```
78
-
79
- </details>
80
-
81
- <details open>
82
- <summary>Inference</summary>
83
-
84
- Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)
85
- . [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest
86
- YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
87
-
88
- ```python
89
- import torch
90
-
91
- # Model
92
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom
93
-
94
- # Images
95
- img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list
96
-
97
- # Inference
98
- results = model(img)
99
-
100
- # Results
101
- results.print() # or .show(), .save(), .crop(), .pandas(), etc.
102
- ```
103
-
104
- </details>
105
-
106
-
107
-
108
- <details>
109
- <summary>Inference with detect.py</summary>
110
-
111
- `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from
112
- the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
113
-
114
- ```bash
115
- python detect.py --source 0 # webcam
116
- img.jpg # image
117
- vid.mp4 # video
118
- path/ # directory
119
- path/*.jpg # glob
120
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
121
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
122
- ```
123
-
124
- </details>
125
-
126
- <details>
127
- <summary>Training</summary>
128
-
129
- The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
130
- results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
131
- and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest
132
- YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
133
- 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the
134
- largest `--batch-size` possible, or pass `--batch-size -1` for
135
- YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
136
-
137
- ```bash
138
- python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128
139
- yolov5s 64
140
- yolov5m 40
141
- yolov5l 24
142
- yolov5x 16
143
- ```
144
-
145
- <img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
146
-
147
- </details>
148
-
149
- <details open>
150
- <summary>Tutorials</summary>
151
-
152
- * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)&nbsp; 🚀 RECOMMENDED
153
- * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)&nbsp; ☘️
154
- RECOMMENDED
155
- * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)&nbsp; 🌟 NEW
156
- * [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)&nbsp; 🌟 NEW
157
- * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475)
158
- * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)&nbsp; ⭐ NEW
159
- * [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀
160
- * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303)
161
- * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318)
162
- * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304)
163
- * [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607)
164
- * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)&nbsp; ⭐ NEW
165
- * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx)
166
-
167
- </details>
168
-
169
- ## <div align="center">Environments</div>
170
-
171
- Get started in seconds with our verified environments. Click each icon below for details.
172
-
173
- <div align="center">
174
- <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb">
175
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-colab-small.png" width="15%"/>
176
- </a>
177
- <a href="https://www.kaggle.com/ultralytics/yolov5">
178
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-kaggle-small.png" width="15%"/>
179
- </a>
180
- <a href="https://hub.docker.com/r/ultralytics/yolov5">
181
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-docker-small.png" width="15%"/>
182
- </a>
183
- <a href="https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart">
184
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-aws-small.png" width="15%"/>
185
- </a>
186
- <a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
187
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="15%"/>
188
- </a>
189
- </div>
190
-
191
- ## <div align="center">Integrations</div>
192
-
193
- <div align="center">
194
- <a href="https://wandb.ai/site?utm_campaign=repo_yolo_readme">
195
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-wb-long.png" width="49%"/>
196
- </a>
197
- <a href="https://roboflow.com/?ref=ultralytics">
198
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-roboflow-long.png" width="49%"/>
199
- </a>
200
- </div>
201
-
202
- |Weights and Biases|Roboflow ⭐ NEW|
203
- |:-:|:-:|
204
- |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |
205
-
206
-
207
- <!-- ## <div align="center">Compete and Win</div>
208
-
209
- We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes!
210
-
211
- <p align="center">
212
- <a href="https://github.com/ultralytics/yolov5/discussions/3213">
213
- <img width="850" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-export-competition.png"></a>
214
- </p> -->
215
-
216
- ## <div align="center">Why YOLOv5</div>
217
-
218
- <p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040763-93c22a27-347c-4e3c-847a-8094621d3f4e.png"></p>
219
- <details>
220
- <summary>YOLOv5-P5 640 Figure (click to expand)</summary>
221
-
222
- <p align="left"><img width="800" src="https://user-images.githubusercontent.com/26833433/155040757-ce0934a3-06a6-43dc-a979-2edbbd69ea0e.png"></p>
223
- </details>
224
- <details>
225
- <summary>Figure Notes (click to expand)</summary>
226
-
227
- * **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536.
228
- * **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32.
229
- * **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8.
230
- * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
231
- </details>
232
-
233
- ### Pretrained Checkpoints
234
-
235
- [assets]: https://github.com/ultralytics/yolov5/releases
236
-
237
- [TTA]: https://github.com/ultralytics/yolov5/issues/303
238
-
239
- |Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>CPU b1<br>(ms) |Speed<br><sup>V100 b1<br>(ms) |Speed<br><sup>V100 b32<br>(ms) |params<br><sup>(M) |FLOPs<br><sup>@640 (B)
240
- |--- |--- |--- |--- |--- |--- |--- |--- |---
241
- |[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5**
242
- |[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5
243
- |[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0
244
- |[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1
245
- |[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7
246
- | | | | | | | | |
247
- |[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6
248
- |[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |12.6 |16.8
249
- |[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0
250
- |[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4
251
- |[YOLOv5x6][assets]<br>+ [TTA][TTA]|1280<br>1536 |55.0<br>**55.8** |72.7<br>**72.7** |3136<br>- |26.2<br>- |19.4<br>- |140.7<br>- |209.8<br>-
252
-
253
- <details>
254
- <summary>Table Notes (click to expand)</summary>
255
-
256
- * All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).
257
- * **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.<br>Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`
258
- * **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.<br>Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`
259
- * **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.<br>Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
260
-
261
- </details>
262
-
263
- ## <div align="center">Contribute</div>
264
-
265
- We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors!
266
-
267
- <a href="https://github.com/ultralytics/yolov5/graphs/contributors"><img src="https://opencollective.com/ultralytics/contributors.svg?width=990" /></a>
268
-
269
- ## <div align="center">Contact</div>
270
-
271
- For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or
272
- professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
273
-
274
- <br>
275
-
276
- <div align="center">
277
- <a href="https://github.com/ultralytics">
278
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-github.png" width="3%"/>
279
- </a>
280
- <img width="3%" />
281
- <a href="https://www.linkedin.com/company/ultralytics">
282
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-linkedin.png" width="3%"/>
283
- </a>
284
- <img width="3%" />
285
- <a href="https://twitter.com/ultralytics">
286
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-twitter.png" width="3%"/>
287
- </a>
288
- <img width="3%" />
289
- <a href="https://www.producthunt.com/@glenn_jocher">
290
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-producthunt.png" width="3%"/>
291
- </a>
292
- <img width="3%" />
293
- <a href="https://youtube.com/ultralytics">
294
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-youtube.png" width="3%"/>
295
- </a>
296
- <img width="3%" />
297
- <a href="https://www.facebook.com/ultralytics">
298
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-facebook.png" width="3%"/>
299
- </a>
300
- <img width="3%" />
301
- <a href="https://www.instagram.com/ultralytics/">
302
- <img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-social-instagram.png" width="3%"/>
303
- </a>
304
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/Argoverse.yaml DELETED
@@ -1,67 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
3
- # Example usage: python train.py --data Argoverse.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── Argoverse ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/Argoverse # dataset root dir
12
- train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
13
- val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
14
- test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
15
-
16
- # Classes
17
- nc: 8 # number of classes
18
- names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names
19
-
20
-
21
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
22
- download: |
23
- import json
24
-
25
- from tqdm import tqdm
26
- from utils.general import download, Path
27
-
28
-
29
- def argoverse2yolo(set):
30
- labels = {}
31
- a = json.load(open(set, "rb"))
32
- for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
33
- img_id = annot['image_id']
34
- img_name = a['images'][img_id]['name']
35
- img_label_name = img_name[:-3] + "txt"
36
-
37
- cls = annot['category_id'] # instance class id
38
- x_center, y_center, width, height = annot['bbox']
39
- x_center = (x_center + width / 2) / 1920.0 # offset and scale
40
- y_center = (y_center + height / 2) / 1200.0 # offset and scale
41
- width /= 1920.0 # scale
42
- height /= 1200.0 # scale
43
-
44
- img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
45
- if not img_dir.exists():
46
- img_dir.mkdir(parents=True, exist_ok=True)
47
-
48
- k = str(img_dir / img_label_name)
49
- if k not in labels:
50
- labels[k] = []
51
- labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
52
-
53
- for k in labels:
54
- with open(k, "w") as f:
55
- f.writelines(labels[k])
56
-
57
-
58
- # Download
59
- dir = Path('../datasets/Argoverse') # dataset root dir
60
- urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
61
- download(urls, dir=dir, delete=False)
62
-
63
- # Convert
64
- annotations_dir = 'Argoverse-HD/annotations/'
65
- (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
66
- for d in "train.json", "val.json":
67
- argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/GlobalWheat2020.yaml DELETED
@@ -1,54 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
3
- # Example usage: python train.py --data GlobalWheat2020.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── GlobalWheat2020 ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/GlobalWheat2020 # dataset root dir
12
- train: # train images (relative to 'path') 3422 images
13
- - images/arvalis_1
14
- - images/arvalis_2
15
- - images/arvalis_3
16
- - images/ethz_1
17
- - images/rres_1
18
- - images/inrae_1
19
- - images/usask_1
20
- val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
21
- - images/ethz_1
22
- test: # test images (optional) 1276 images
23
- - images/utokyo_1
24
- - images/utokyo_2
25
- - images/nau_1
26
- - images/uq_1
27
-
28
- # Classes
29
- nc: 1 # number of classes
30
- names: ['wheat_head'] # class names
31
-
32
-
33
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
34
- download: |
35
- from utils.general import download, Path
36
-
37
-
38
- # Download
39
- dir = Path(yaml['path']) # dataset root dir
40
- urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41
- 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
42
- download(urls, dir=dir)
43
-
44
- # Make Directories
45
- for p in 'annotations', 'images', 'labels':
46
- (dir / p).mkdir(parents=True, exist_ok=True)
47
-
48
- # Move
49
- for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50
- 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51
- (dir / p).rename(dir / 'images' / p) # move to /images
52
- f = (dir / p).with_suffix('.json') # json file
53
- if f.exists():
54
- f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/Objects365.yaml DELETED
@@ -1,113 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Objects365 dataset https://www.objects365.org/ by Megvii
3
- # Example usage: python train.py --data Objects365.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── Objects365 ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/Objects365 # dataset root dir
12
- train: images/train # train images (relative to 'path') 1742289 images
13
- val: images/val # val images (relative to 'path') 80000 images
14
- test: # test images (optional)
15
-
16
- # Classes
17
- nc: 365 # number of classes
18
- names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup',
19
- 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book',
20
- 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag',
21
- 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV',
22
- 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle',
23
- 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird',
24
- 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',
25
- 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning',
26
- 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife',
27
- 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock',
28
- 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish',
29
- 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan',
30
- 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard',
31
- 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign',
32
- 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
33
- 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard',
34
- 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry',
35
- 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
36
- 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors',
37
- 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape',
38
- 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',
39
- 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette',
40
- 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket',
41
- 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine',
42
- 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine',
43
- 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon',
44
- 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse',
45
- 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball',
46
- 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin',
47
- 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts',
48
- 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
49
- 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD',
50
- 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder',
51
- 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips',
52
- 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab',
53
- 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',
54
- 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart',
55
- 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French',
56
- 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell',
57
- 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil',
58
- 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis']
59
-
60
-
61
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
62
- download: |
63
- from pycocotools.coco import COCO
64
- from tqdm import tqdm
65
-
66
- from utils.general import Path, download, np, xyxy2xywhn
67
-
68
-
69
- # Make Directories
70
- dir = Path(yaml['path']) # dataset root dir
71
- for p in 'images', 'labels':
72
- (dir / p).mkdir(parents=True, exist_ok=True)
73
- for q in 'train', 'val':
74
- (dir / p / q).mkdir(parents=True, exist_ok=True)
75
-
76
- # Train, Val Splits
77
- for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
78
- print(f"Processing {split} in {patches} patches ...")
79
- images, labels = dir / 'images' / split, dir / 'labels' / split
80
-
81
- # Download
82
- url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
83
- if split == 'train':
84
- download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json
85
- download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
86
- elif split == 'val':
87
- download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
88
- download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
89
- download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
90
-
91
- # Move
92
- for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
93
- f.rename(images / f.name) # move to /images/{split}
94
-
95
- # Labels
96
- coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
97
- names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
98
- for cid, cat in enumerate(names):
99
- catIds = coco.getCatIds(catNms=[cat])
100
- imgIds = coco.getImgIds(catIds=catIds)
101
- for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
102
- width, height = im["width"], im["height"]
103
- path = Path(im["file_name"]) # image filename
104
- try:
105
- with open(labels / path.with_suffix('.txt').name, 'a') as file:
106
- annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
107
- for a in coco.loadAnns(annIds):
108
- x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
109
- xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
110
- x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
111
- file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
112
- except Exception as e:
113
- print(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/SKU-110K.yaml DELETED
@@ -1,53 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
3
- # Example usage: python train.py --data SKU-110K.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── SKU-110K ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/SKU-110K # dataset root dir
12
- train: train.txt # train images (relative to 'path') 8219 images
13
- val: val.txt # val images (relative to 'path') 588 images
14
- test: test.txt # test images (optional) 2936 images
15
-
16
- # Classes
17
- nc: 1 # number of classes
18
- names: ['object'] # class names
19
-
20
-
21
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
22
- download: |
23
- import shutil
24
- from tqdm import tqdm
25
- from utils.general import np, pd, Path, download, xyxy2xywh
26
-
27
-
28
- # Download
29
- dir = Path(yaml['path']) # dataset root dir
30
- parent = Path(dir.parent) # download dir
31
- urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
32
- download(urls, dir=parent, delete=False)
33
-
34
- # Rename directories
35
- if dir.exists():
36
- shutil.rmtree(dir)
37
- (parent / 'SKU110K_fixed').rename(dir) # rename dir
38
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
39
-
40
- # Convert labels
41
- names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
42
- for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
43
- x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
44
- images, unique_images = x[:, 0], np.unique(x[:, 0])
45
- with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
46
- f.writelines(f'./images/{s}\n' for s in unique_images)
47
- for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
48
- cls = 0 # single-class dataset
49
- with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
50
- for r in x[images == im]:
51
- w, h = r[6], r[7] # image width, height
52
- xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
53
- f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/VOC.yaml DELETED
@@ -1,80 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
3
- # Example usage: python train.py --data VOC.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── VOC ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/VOC
12
- train: # train images (relative to 'path') 16551 images
13
- - images/train2012
14
- - images/train2007
15
- - images/val2012
16
- - images/val2007
17
- val: # val images (relative to 'path') 4952 images
18
- - images/test2007
19
- test: # test images (optional)
20
- - images/test2007
21
-
22
- # Classes
23
- nc: 20 # number of classes
24
- names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
25
- 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names
26
-
27
-
28
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
29
- download: |
30
- import xml.etree.ElementTree as ET
31
-
32
- from tqdm import tqdm
33
- from utils.general import download, Path
34
-
35
-
36
- def convert_label(path, lb_path, year, image_id):
37
- def convert_box(size, box):
38
- dw, dh = 1. / size[0], 1. / size[1]
39
- x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
40
- return x * dw, y * dh, w * dw, h * dh
41
-
42
- in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
43
- out_file = open(lb_path, 'w')
44
- tree = ET.parse(in_file)
45
- root = tree.getroot()
46
- size = root.find('size')
47
- w = int(size.find('width').text)
48
- h = int(size.find('height').text)
49
-
50
- for obj in root.iter('object'):
51
- cls = obj.find('name').text
52
- if cls in yaml['names'] and not int(obj.find('difficult').text) == 1:
53
- xmlbox = obj.find('bndbox')
54
- bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
55
- cls_id = yaml['names'].index(cls) # class id
56
- out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
57
-
58
-
59
- # Download
60
- dir = Path(yaml['path']) # dataset root dir
61
- url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
62
- urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
63
- url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
64
- url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
65
- download(urls, dir=dir / 'images', delete=False, threads=3)
66
-
67
- # Convert
68
- path = dir / f'images/VOCdevkit'
69
- for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
70
- imgs_path = dir / 'images' / f'{image_set}{year}'
71
- lbs_path = dir / 'labels' / f'{image_set}{year}'
72
- imgs_path.mkdir(exist_ok=True, parents=True)
73
- lbs_path.mkdir(exist_ok=True, parents=True)
74
-
75
- image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split()
76
- for id in tqdm(image_ids, desc=f'{image_set}{year}'):
77
- f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
78
- lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
79
- f.rename(imgs_path / f.name) # move image
80
- convert_label(path, lb_path, year, id) # convert labels to YOLO format
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/VisDrone.yaml DELETED
@@ -1,61 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
3
- # Example usage: python train.py --data VisDrone.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── VisDrone ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/VisDrone # dataset root dir
12
- train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
13
- val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
14
- test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
15
-
16
- # Classes
17
- nc: 10 # number of classes
18
- names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor']
19
-
20
-
21
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
22
- download: |
23
- from utils.general import download, os, Path
24
-
25
- def visdrone2yolo(dir):
26
- from PIL import Image
27
- from tqdm import tqdm
28
-
29
- def convert_box(size, box):
30
- # Convert VisDrone box to YOLO xywh box
31
- dw = 1. / size[0]
32
- dh = 1. / size[1]
33
- return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
34
-
35
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
36
- pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
37
- for f in pbar:
38
- img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
39
- lines = []
40
- with open(f, 'r') as file: # read annotation.txt
41
- for row in [x.split(',') for x in file.read().strip().splitlines()]:
42
- if row[4] == '0': # VisDrone 'ignored regions' class 0
43
- continue
44
- cls = int(row[5]) - 1
45
- box = convert_box(img_size, tuple(map(int, row[:4])))
46
- lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
47
- with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
48
- fl.writelines(lines) # write label.txt
49
-
50
-
51
- # Download
52
- dir = Path(yaml['path']) # dataset root dir
53
- urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
54
- 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
55
- 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
56
- 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
57
- download(urls, dir=dir, threads=4)
58
-
59
- # Convert
60
- for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
61
- visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/coco.yaml DELETED
@@ -1,45 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # COCO 2017 dataset http://cocodataset.org by Microsoft
3
- # Example usage: python train.py --data coco.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── coco ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/coco # dataset root dir
12
- train: train2017.txt # train images (relative to 'path') 118287 images
13
- val: val2017.txt # val images (relative to 'path') 5000 images
14
- test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
15
-
16
- # Classes
17
- nc: 80 # number of classes
18
- names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19
- 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20
- 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22
- 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23
- 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25
- 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26
- 'hair drier', 'toothbrush'] # class names
27
-
28
-
29
- # Download script/URL (optional)
30
- download: |
31
- from utils.general import download, Path
32
-
33
-
34
- # Download labels
35
- segments = False # segment or box labels
36
- dir = Path(yaml['path']) # dataset root dir
37
- url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
38
- urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
39
- download(urls, dir=dir.parent)
40
-
41
- # Download data
42
- urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
43
- 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
44
- 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
45
- download(urls, dir=dir / 'images', threads=3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/coco128.yaml DELETED
@@ -1,30 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
3
- # Example usage: python train.py --data coco128.yaml
4
- # parent
5
- # ├── yolov5
6
- # └── datasets
7
- # └── coco128 ← downloads here
8
-
9
-
10
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
- path: ../datasets/coco128 # dataset root dir
12
- train: images/train2017 # train images (relative to 'path') 128 images
13
- val: images/train2017 # val images (relative to 'path') 128 images
14
- test: # test images (optional)
15
-
16
- # Classes
17
- nc: 80 # number of classes
18
- names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
19
- 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
20
- 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
21
- 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
22
- 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
23
- 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
24
- 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
25
- 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
26
- 'hair drier', 'toothbrush'] # class names
27
-
28
-
29
- # Download script/URL (optional)
30
- download: https://ultralytics.com/assets/coco128.zip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/hyps/hyp.Objects365.yaml DELETED
@@ -1,34 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Hyperparameters for Objects365 training
3
- # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
4
- # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5
-
6
- lr0: 0.00258
7
- lrf: 0.17
8
- momentum: 0.779
9
- weight_decay: 0.00058
10
- warmup_epochs: 1.33
11
- warmup_momentum: 0.86
12
- warmup_bias_lr: 0.0711
13
- box: 0.0539
14
- cls: 0.299
15
- cls_pw: 0.825
16
- obj: 0.632
17
- obj_pw: 1.0
18
- iou_t: 0.2
19
- anchor_t: 3.44
20
- anchors: 3.2
21
- fl_gamma: 0.0
22
- hsv_h: 0.0188
23
- hsv_s: 0.704
24
- hsv_v: 0.36
25
- degrees: 0.0
26
- translate: 0.0902
27
- scale: 0.491
28
- shear: 0.0
29
- perspective: 0.0
30
- flipud: 0.0
31
- fliplr: 0.5
32
- mosaic: 1.0
33
- mixup: 0.0
34
- copy_paste: 0.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/hyps/hyp.VOC.yaml DELETED
@@ -1,40 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Hyperparameters for VOC training
3
- # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
4
- # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
5
-
6
- # YOLOv5 Hyperparameter Evolution Results
7
- # Best generation: 467
8
- # Last generation: 996
9
- # metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
10
- # 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
11
-
12
- lr0: 0.00334
13
- lrf: 0.15135
14
- momentum: 0.74832
15
- weight_decay: 0.00025
16
- warmup_epochs: 3.3835
17
- warmup_momentum: 0.59462
18
- warmup_bias_lr: 0.18657
19
- box: 0.02
20
- cls: 0.21638
21
- cls_pw: 0.5
22
- obj: 0.51728
23
- obj_pw: 0.67198
24
- iou_t: 0.2
25
- anchor_t: 3.3744
26
- fl_gamma: 0.0
27
- hsv_h: 0.01041
28
- hsv_s: 0.54703
29
- hsv_v: 0.27739
30
- degrees: 0.0
31
- translate: 0.04591
32
- scale: 0.75544
33
- shear: 0.0
34
- perspective: 0.0
35
- flipud: 0.0
36
- fliplr: 0.5
37
- mosaic: 0.85834
38
- mixup: 0.04266
39
- copy_paste: 0.0
40
- anchors: 3.412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/hyps/hyp.scratch-high.yaml DELETED
@@ -1,34 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Hyperparameters for high-augmentation COCO training from scratch
3
- # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4
- # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5
-
6
- lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7
- lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8
- momentum: 0.937 # SGD momentum/Adam beta1
9
- weight_decay: 0.0005 # optimizer weight decay 5e-4
10
- warmup_epochs: 3.0 # warmup epochs (fractions ok)
11
- warmup_momentum: 0.8 # warmup initial momentum
12
- warmup_bias_lr: 0.1 # warmup initial bias lr
13
- box: 0.05 # box loss gain
14
- cls: 0.3 # cls loss gain
15
- cls_pw: 1.0 # cls BCELoss positive_weight
16
- obj: 0.7 # obj loss gain (scale with pixels)
17
- obj_pw: 1.0 # obj BCELoss positive_weight
18
- iou_t: 0.20 # IoU training threshold
19
- anchor_t: 4.0 # anchor-multiple threshold
20
- # anchors: 3 # anchors per output layer (0 to ignore)
21
- fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
- hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
- hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24
- hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25
- degrees: 0.0 # image rotation (+/- deg)
26
- translate: 0.1 # image translation (+/- fraction)
27
- scale: 0.9 # image scale (+/- gain)
28
- shear: 0.0 # image shear (+/- deg)
29
- perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30
- flipud: 0.0 # image flip up-down (probability)
31
- fliplr: 0.5 # image flip left-right (probability)
32
- mosaic: 1.0 # image mosaic (probability)
33
- mixup: 0.1 # image mixup (probability)
34
- copy_paste: 0.1 # segment copy-paste (probability)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/hyps/hyp.scratch-low.yaml DELETED
@@ -1,34 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Hyperparameters for low-augmentation COCO training from scratch
3
- # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
4
- # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5
-
6
- lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7
- lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
8
- momentum: 0.937 # SGD momentum/Adam beta1
9
- weight_decay: 0.0005 # optimizer weight decay 5e-4
10
- warmup_epochs: 3.0 # warmup epochs (fractions ok)
11
- warmup_momentum: 0.8 # warmup initial momentum
12
- warmup_bias_lr: 0.1 # warmup initial bias lr
13
- box: 0.05 # box loss gain
14
- cls: 0.5 # cls loss gain
15
- cls_pw: 1.0 # cls BCELoss positive_weight
16
- obj: 1.0 # obj loss gain (scale with pixels)
17
- obj_pw: 1.0 # obj BCELoss positive_weight
18
- iou_t: 0.20 # IoU training threshold
19
- anchor_t: 4.0 # anchor-multiple threshold
20
- # anchors: 3 # anchors per output layer (0 to ignore)
21
- fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
- hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
- hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24
- hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25
- degrees: 0.0 # image rotation (+/- deg)
26
- translate: 0.1 # image translation (+/- fraction)
27
- scale: 0.5 # image scale (+/- gain)
28
- shear: 0.0 # image shear (+/- deg)
29
- perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30
- flipud: 0.0 # image flip up-down (probability)
31
- fliplr: 0.5 # image flip left-right (probability)
32
- mosaic: 1.0 # image mosaic (probability)
33
- mixup: 0.0 # image mixup (probability)
34
- copy_paste: 0.0 # segment copy-paste (probability)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/hyps/hyp.scratch-med.yaml DELETED
@@ -1,34 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Hyperparameters for medium-augmentation COCO training from scratch
3
- # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
4
- # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
5
-
6
- lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
7
- lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
8
- momentum: 0.937 # SGD momentum/Adam beta1
9
- weight_decay: 0.0005 # optimizer weight decay 5e-4
10
- warmup_epochs: 3.0 # warmup epochs (fractions ok)
11
- warmup_momentum: 0.8 # warmup initial momentum
12
- warmup_bias_lr: 0.1 # warmup initial bias lr
13
- box: 0.05 # box loss gain
14
- cls: 0.3 # cls loss gain
15
- cls_pw: 1.0 # cls BCELoss positive_weight
16
- obj: 0.7 # obj loss gain (scale with pixels)
17
- obj_pw: 1.0 # obj BCELoss positive_weight
18
- iou_t: 0.20 # IoU training threshold
19
- anchor_t: 4.0 # anchor-multiple threshold
20
- # anchors: 3 # anchors per output layer (0 to ignore)
21
- fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
- hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
- hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
24
- hsv_v: 0.4 # image HSV-Value augmentation (fraction)
25
- degrees: 0.0 # image rotation (+/- deg)
26
- translate: 0.1 # image translation (+/- fraction)
27
- scale: 0.9 # image scale (+/- gain)
28
- shear: 0.0 # image shear (+/- deg)
29
- perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
30
- flipud: 0.0 # image flip up-down (probability)
31
- fliplr: 0.5 # image flip left-right (probability)
32
- mosaic: 1.0 # image mosaic (probability)
33
- mixup: 0.1 # image mixup (probability)
34
- copy_paste: 0.0 # segment copy-paste (probability)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/images/bus.jpg DELETED
Binary file (487 kB)
 
ultralytics/yolov5/data/images/zidane.jpg DELETED
Binary file (169 kB)
 
ultralytics/yolov5/data/scripts/download_weights.sh DELETED
@@ -1,20 +0,0 @@
1
- #!/bin/bash
2
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3
- # Download latest models from https://github.com/ultralytics/yolov5/releases
4
- # Example usage: bash path/to/download_weights.sh
5
- # parent
6
- # └── yolov5
7
- # ├── yolov5s.pt ← downloads here
8
- # ├── yolov5m.pt
9
- # └── ...
10
-
11
- python - <<EOF
12
- from utils.downloads import attempt_download
13
-
14
- models = ['n', 's', 'm', 'l', 'x']
15
- models.extend([x + '6' for x in models]) # add P6 models
16
-
17
- for x in models:
18
- attempt_download(f'yolov5{x}.pt')
19
-
20
- EOF
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/scripts/get_coco.sh DELETED
@@ -1,27 +0,0 @@
1
- #!/bin/bash
2
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3
- # Download COCO 2017 dataset http://cocodataset.org
4
- # Example usage: bash data/scripts/get_coco.sh
5
- # parent
6
- # ├── yolov5
7
- # └── datasets
8
- # └── coco ← downloads here
9
-
10
- # Download/unzip labels
11
- d='../datasets' # unzip directory
12
- url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
13
- f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
14
- echo 'Downloading' $url$f ' ...'
15
- curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &
16
-
17
- # Download/unzip images
18
- d='../datasets/coco/images' # unzip directory
19
- url=http://images.cocodataset.org/zips/
20
- f1='train2017.zip' # 19G, 118k images
21
- f2='val2017.zip' # 1G, 5k images
22
- f3='test2017.zip' # 7G, 41k images (optional)
23
- for f in $f1 $f2; do
24
- echo 'Downloading' $url$f '...'
25
- curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &
26
- done
27
- wait # finish background tasks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/scripts/get_coco128.sh DELETED
@@ -1,17 +0,0 @@
1
- #!/bin/bash
2
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
3
- # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
4
- # Example usage: bash data/scripts/get_coco128.sh
5
- # parent
6
- # ├── yolov5
7
- # └── datasets
8
- # └── coco128 ← downloads here
9
-
10
- # Download/unzip images and labels
11
- d='../datasets' # unzip directory
12
- url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
13
- f='coco128.zip' # or 'coco128-segments.zip', 68 MB
14
- echo 'Downloading' $url$f ' ...'
15
- curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &
16
-
17
- wait # finish background tasks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/data/xView.yaml DELETED
@@ -1,102 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
3
- # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
4
- # Example usage: python train.py --data xView.yaml
5
- # parent
6
- # ├── yolov5
7
- # └── datasets
8
- # └── xView ← downloads here
9
-
10
-
11
- # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/xView # dataset root dir
13
- train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
14
- val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
15
-
16
- # Classes
17
- nc: 60 # number of classes
18
- names: ['Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus',
19
- 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer',
20
- 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car',
21
- 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge',
22
- 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane',
23
- 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck',
24
- 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed',
25
- 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad',
26
- 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'] # class names
27
-
28
-
29
- # Download script/URL (optional) ---------------------------------------------------------------------------------------
30
- download: |
31
- import json
32
- import os
33
- from pathlib import Path
34
-
35
- import numpy as np
36
- from PIL import Image
37
- from tqdm import tqdm
38
-
39
- from utils.datasets import autosplit
40
- from utils.general import download, xyxy2xywhn
41
-
42
-
43
- def convert_labels(fname=Path('xView/xView_train.geojson')):
44
- # Convert xView geoJSON labels to YOLO format
45
- path = fname.parent
46
- with open(fname) as f:
47
- print(f'Loading {fname}...')
48
- data = json.load(f)
49
-
50
- # Make dirs
51
- labels = Path(path / 'labels' / 'train')
52
- os.system(f'rm -rf {labels}')
53
- labels.mkdir(parents=True, exist_ok=True)
54
-
55
- # xView classes 11-94 to 0-59
56
- xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
57
- 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
58
- 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
59
- 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
60
-
61
- shapes = {}
62
- for feature in tqdm(data['features'], desc=f'Converting {fname}'):
63
- p = feature['properties']
64
- if p['bounds_imcoords']:
65
- id = p['image_id']
66
- file = path / 'train_images' / id
67
- if file.exists(): # 1395.tif missing
68
- try:
69
- box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
70
- assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
71
- cls = p['type_id']
72
- cls = xview_class2index[int(cls)] # xView class to 0-60
73
- assert 59 >= cls >= 0, f'incorrect class index {cls}'
74
-
75
- # Write YOLO label
76
- if id not in shapes:
77
- shapes[id] = Image.open(file).size
78
- box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
79
- with open((labels / id).with_suffix('.txt'), 'a') as f:
80
- f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
81
- except Exception as e:
82
- print(f'WARNING: skipping one label for {file}: {e}')
83
-
84
-
85
- # Download manually from https://challenge.xviewdataset.org
86
- dir = Path(yaml['path']) # dataset root dir
87
- # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
88
- # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
89
- # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
90
- # download(urls, dir=dir, delete=False)
91
-
92
- # Convert labels
93
- convert_labels(dir / 'xView_train.geojson')
94
-
95
- # Move images
96
- images = Path(dir / 'images')
97
- images.mkdir(parents=True, exist_ok=True)
98
- Path(dir / 'train_images').rename(dir / 'images' / 'train')
99
- Path(dir / 'val_images').rename(dir / 'images' / 'val')
100
-
101
- # Split
102
- autosplit(dir / 'images' / 'train')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/detect.py DELETED
@@ -1,252 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Run inference on images, videos, directories, streams, etc.
4
-
5
- Usage - sources:
6
- $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam
7
- img.jpg # image
8
- vid.mp4 # video
9
- path/ # directory
10
- path/*.jpg # glob
11
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
12
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
13
-
14
- Usage - formats:
15
- $ python path/to/detect.py --weights yolov5s.pt # PyTorch
16
- yolov5s.torchscript # TorchScript
17
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
18
- yolov5s.xml # OpenVINO
19
- yolov5s.engine # TensorRT
20
- yolov5s.mlmodel # CoreML (MacOS-only)
21
- yolov5s_saved_model # TensorFlow SavedModel
22
- yolov5s.pb # TensorFlow GraphDef
23
- yolov5s.tflite # TensorFlow Lite
24
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
25
- """
26
-
27
- import argparse
28
- import os
29
- import sys
30
- from pathlib import Path
31
-
32
- import cv2
33
- import torch
34
- import torch.backends.cudnn as cudnn
35
-
36
- FILE = Path(__file__).resolve()
37
- ROOT = FILE.parents[0] # YOLOv5 root directory
38
- if str(ROOT) not in sys.path:
39
- sys.path.append(str(ROOT)) # add ROOT to PATH
40
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
41
-
42
- from models.common import DetectMultiBackend
43
- from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
44
- from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
45
- increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
46
- from utils.plots import Annotator, colors, save_one_box
47
- from utils.torch_utils import select_device, time_sync
48
-
49
-
50
- @torch.no_grad()
51
- def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
52
- source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
53
- data=ROOT / 'data/coco128.yaml', # dataset.yaml path
54
- imgsz=(640, 640), # inference size (height, width)
55
- conf_thres=0.25, # confidence threshold
56
- iou_thres=0.45, # NMS IOU threshold
57
- max_det=1000, # maximum detections per image
58
- device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
59
- view_img=False, # show results
60
- save_txt=False, # save results to *.txt
61
- save_conf=False, # save confidences in --save-txt labels
62
- save_crop=False, # save cropped prediction boxes
63
- nosave=False, # do not save images/videos
64
- classes=None, # filter by class: --class 0, or --class 0 2 3
65
- agnostic_nms=False, # class-agnostic NMS
66
- augment=False, # augmented inference
67
- visualize=False, # visualize features
68
- update=False, # update all models
69
- project=ROOT / 'runs/detect', # save results to project/name
70
- name='exp', # save results to project/name
71
- exist_ok=False, # existing project/name ok, do not increment
72
- line_thickness=3, # bounding box thickness (pixels)
73
- hide_labels=False, # hide labels
74
- hide_conf=False, # hide confidences
75
- half=False, # use FP16 half-precision inference
76
- dnn=False, # use OpenCV DNN for ONNX inference
77
- ):
78
- source = str(source)
79
- save_img = not nosave and not source.endswith('.txt') # save inference images
80
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
81
- is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
82
- webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
83
- if is_url and is_file:
84
- source = check_file(source) # download
85
-
86
- # Directories
87
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
88
- (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
89
-
90
- # Load model
91
- device = select_device(device)
92
- model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
93
- stride, names, pt = model.stride, model.names, model.pt
94
- imgsz = check_img_size(imgsz, s=stride) # check image size
95
-
96
- # Dataloader
97
- if webcam:
98
- view_img = check_imshow()
99
- cudnn.benchmark = True # set True to speed up constant image size inference
100
- dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
101
- bs = len(dataset) # batch_size
102
- else:
103
- dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
104
- bs = 1 # batch_size
105
- vid_path, vid_writer = [None] * bs, [None] * bs
106
-
107
- # Run inference
108
- model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
109
- dt, seen = [0.0, 0.0, 0.0], 0
110
- for path, im, im0s, vid_cap, s in dataset:
111
- t1 = time_sync()
112
- im = torch.from_numpy(im).to(device)
113
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
114
- im /= 255 # 0 - 255 to 0.0 - 1.0
115
- if len(im.shape) == 3:
116
- im = im[None] # expand for batch dim
117
- t2 = time_sync()
118
- dt[0] += t2 - t1
119
-
120
- # Inference
121
- visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
122
- pred = model(im, augment=augment, visualize=visualize)
123
- t3 = time_sync()
124
- dt[1] += t3 - t2
125
-
126
- # NMS
127
- pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
128
- dt[2] += time_sync() - t3
129
-
130
- # Second-stage classifier (optional)
131
- # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
132
-
133
- # Process predictions
134
- for i, det in enumerate(pred): # per image
135
- seen += 1
136
- if webcam: # batch_size >= 1
137
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
138
- s += f'{i}: '
139
- else:
140
- p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
141
-
142
- p = Path(p) # to Path
143
- save_path = str(save_dir / p.name) # im.jpg
144
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
145
- s += '%gx%g ' % im.shape[2:] # print string
146
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
147
- imc = im0.copy() if save_crop else im0 # for save_crop
148
- annotator = Annotator(im0, line_width=line_thickness, example=str(names))
149
- if len(det):
150
- # Rescale boxes from img_size to im0 size
151
- det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
152
-
153
- # Print results
154
- for c in det[:, -1].unique():
155
- n = (det[:, -1] == c).sum() # detections per class
156
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
157
-
158
- # Write results
159
- for *xyxy, conf, cls in reversed(det):
160
- if save_txt: # Write to file
161
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
162
- line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
163
- with open(txt_path + '.txt', 'a') as f:
164
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
165
-
166
- if save_img or save_crop or view_img: # Add bbox to image
167
- c = int(cls) # integer class
168
- label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
169
- annotator.box_label(xyxy, label, color=colors(c, True))
170
- if save_crop:
171
- save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
172
-
173
- # Stream results
174
- im0 = annotator.result()
175
- if view_img:
176
- cv2.imshow(str(p), im0)
177
- cv2.waitKey(1) # 1 millisecond
178
-
179
- # Save results (image with detections)
180
- if save_img:
181
- if dataset.mode == 'image':
182
- cv2.imwrite(save_path, im0)
183
- else: # 'video' or 'stream'
184
- if vid_path[i] != save_path: # new video
185
- vid_path[i] = save_path
186
- if isinstance(vid_writer[i], cv2.VideoWriter):
187
- vid_writer[i].release() # release previous video writer
188
- if vid_cap: # video
189
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
190
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
191
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
192
- else: # stream
193
- fps, w, h = 30, im0.shape[1], im0.shape[0]
194
- save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
195
- vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
196
- vid_writer[i].write(im0)
197
-
198
- # Print time (inference-only)
199
- LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
200
-
201
- # Print results
202
- t = tuple(x / seen * 1E3 for x in dt) # speeds per image
203
- LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
204
- if save_txt or save_img:
205
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
206
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
207
- if update:
208
- strip_optimizer(weights) # update model (to fix SourceChangeWarning)
209
-
210
-
211
- def parse_opt():
212
- parser = argparse.ArgumentParser()
213
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)')
214
- parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam')
215
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
216
- parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
217
- parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
218
- parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
219
- parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
220
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
221
- parser.add_argument('--view-img', action='store_true', help='show results')
222
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
223
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
224
- parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
225
- parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
226
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
227
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
228
- parser.add_argument('--augment', action='store_true', help='augmented inference')
229
- parser.add_argument('--visualize', action='store_true', help='visualize features')
230
- parser.add_argument('--update', action='store_true', help='update all models')
231
- parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
232
- parser.add_argument('--name', default='exp', help='save results to project/name')
233
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
234
- parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
235
- parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
236
- parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
237
- parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
238
- parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
239
- opt = parser.parse_args()
240
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
241
- print_args(FILE.stem, opt)
242
- return opt
243
-
244
-
245
- def main(opt):
246
- check_requirements(exclude=('tensorboard', 'thop'))
247
- run(**vars(opt))
248
-
249
-
250
- if __name__ == "__main__":
251
- opt = parse_opt()
252
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/export.py DELETED
@@ -1,559 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
-
5
- Format | `export.py --include` | Model
6
- --- | --- | ---
7
- PyTorch | - | yolov5s.pt
8
- TorchScript | `torchscript` | yolov5s.torchscript
9
- ONNX | `onnx` | yolov5s.onnx
10
- OpenVINO | `openvino` | yolov5s_openvino_model/
11
- TensorRT | `engine` | yolov5s.engine
12
- CoreML | `coreml` | yolov5s.mlmodel
13
- TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
- TensorFlow GraphDef | `pb` | yolov5s.pb
15
- TensorFlow Lite | `tflite` | yolov5s.tflite
16
- TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
- TensorFlow.js | `tfjs` | yolov5s_web_model/
18
-
19
- Requirements:
20
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22
-
23
- Usage:
24
- $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
25
-
26
- Inference:
27
- $ python path/to/detect.py --weights yolov5s.pt # PyTorch
28
- yolov5s.torchscript # TorchScript
29
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
30
- yolov5s.xml # OpenVINO
31
- yolov5s.engine # TensorRT
32
- yolov5s.mlmodel # CoreML (MacOS-only)
33
- yolov5s_saved_model # TensorFlow SavedModel
34
- yolov5s.pb # TensorFlow GraphDef
35
- yolov5s.tflite # TensorFlow Lite
36
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
37
-
38
- TensorFlow.js:
39
- $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
40
- $ npm install
41
- $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
42
- $ npm start
43
- """
44
-
45
- import argparse
46
- import json
47
- import os
48
- import platform
49
- import subprocess
50
- import sys
51
- import time
52
- import warnings
53
- from pathlib import Path
54
-
55
- import pandas as pd
56
- import torch
57
- import torch.nn as nn
58
- from torch.utils.mobile_optimizer import optimize_for_mobile
59
-
60
- FILE = Path(__file__).resolve()
61
- ROOT = FILE.parents[0] # YOLOv5 root directory
62
- if str(ROOT) not in sys.path:
63
- sys.path.append(str(ROOT)) # add ROOT to PATH
64
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
65
-
66
- from models.common import Conv
67
- from models.experimental import attempt_load
68
- from models.yolo import Detect
69
- from utils.activations import SiLU
70
- from utils.datasets import LoadImages
71
- from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr,
72
- file_size, print_args, url2file)
73
- from utils.torch_utils import select_device
74
-
75
-
76
- def export_formats():
77
- # YOLOv5 export formats
78
- x = [['PyTorch', '-', '.pt', True],
79
- ['TorchScript', 'torchscript', '.torchscript', True],
80
- ['ONNX', 'onnx', '.onnx', True],
81
- ['OpenVINO', 'openvino', '_openvino_model', False],
82
- ['TensorRT', 'engine', '.engine', True],
83
- ['CoreML', 'coreml', '.mlmodel', False],
84
- ['TensorFlow SavedModel', 'saved_model', '_saved_model', True],
85
- ['TensorFlow GraphDef', 'pb', '.pb', True],
86
- ['TensorFlow Lite', 'tflite', '.tflite', False],
87
- ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False],
88
- ['TensorFlow.js', 'tfjs', '_web_model', False]]
89
- return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU'])
90
-
91
-
92
- def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
93
- # YOLOv5 TorchScript model export
94
- try:
95
- LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
96
- f = file.with_suffix('.torchscript')
97
-
98
- ts = torch.jit.trace(model, im, strict=False)
99
- d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
100
- extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
101
- if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
102
- optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
103
- else:
104
- ts.save(str(f), _extra_files=extra_files)
105
-
106
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
107
- return f
108
- except Exception as e:
109
- LOGGER.info(f'{prefix} export failure: {e}')
110
-
111
-
112
- def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')):
113
- # YOLOv5 ONNX export
114
- try:
115
- check_requirements(('onnx',))
116
- import onnx
117
-
118
- LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
119
- f = file.with_suffix('.onnx')
120
-
121
- torch.onnx.export(model, im, f, verbose=False, opset_version=opset,
122
- training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
123
- do_constant_folding=not train,
124
- input_names=['images'],
125
- output_names=['output'],
126
- dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640)
127
- 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
128
- } if dynamic else None)
129
-
130
- # Checks
131
- model_onnx = onnx.load(f) # load onnx model
132
- onnx.checker.check_model(model_onnx) # check onnx model
133
- # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
134
-
135
- # Simplify
136
- if simplify:
137
- try:
138
- check_requirements(('onnx-simplifier',))
139
- import onnxsim
140
-
141
- LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
142
- model_onnx, check = onnxsim.simplify(
143
- model_onnx,
144
- dynamic_input_shape=dynamic,
145
- input_shapes={'images': list(im.shape)} if dynamic else None)
146
- assert check, 'assert check failed'
147
- onnx.save(model_onnx, f)
148
- except Exception as e:
149
- LOGGER.info(f'{prefix} simplifier failure: {e}')
150
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
151
- return f
152
- except Exception as e:
153
- LOGGER.info(f'{prefix} export failure: {e}')
154
-
155
-
156
- def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')):
157
- # YOLOv5 OpenVINO export
158
- try:
159
- check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
160
- import openvino.inference_engine as ie
161
-
162
- LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
163
- f = str(file).replace('.pt', '_openvino_model' + os.sep)
164
-
165
- cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
166
- subprocess.check_output(cmd, shell=True)
167
-
168
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
169
- return f
170
- except Exception as e:
171
- LOGGER.info(f'\n{prefix} export failure: {e}')
172
-
173
-
174
- def export_coreml(model, im, file, prefix=colorstr('CoreML:')):
175
- # YOLOv5 CoreML export
176
- try:
177
- check_requirements(('coremltools',))
178
- import coremltools as ct
179
-
180
- LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
181
- f = file.with_suffix('.mlmodel')
182
-
183
- ts = torch.jit.trace(model, im, strict=False) # TorchScript model
184
- ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
185
- ct_model.save(f)
186
-
187
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
188
- return ct_model, f
189
- except Exception as e:
190
- LOGGER.info(f'\n{prefix} export failure: {e}')
191
- return None, None
192
-
193
-
194
- def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
195
- # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
196
- try:
197
- check_requirements(('tensorrt',))
198
- import tensorrt as trt
199
-
200
- if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
201
- grid = model.model[-1].anchor_grid
202
- model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
203
- export_onnx(model, im, file, 12, train, False, simplify) # opset 12
204
- model.model[-1].anchor_grid = grid
205
- else: # TensorRT >= 8
206
- check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
207
- export_onnx(model, im, file, 13, train, False, simplify) # opset 13
208
- onnx = file.with_suffix('.onnx')
209
-
210
- LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
211
- assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
212
- assert onnx.exists(), f'failed to export ONNX file: {onnx}'
213
- f = file.with_suffix('.engine') # TensorRT engine file
214
- logger = trt.Logger(trt.Logger.INFO)
215
- if verbose:
216
- logger.min_severity = trt.Logger.Severity.VERBOSE
217
-
218
- builder = trt.Builder(logger)
219
- config = builder.create_builder_config()
220
- config.max_workspace_size = workspace * 1 << 30
221
- # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
222
-
223
- flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
224
- network = builder.create_network(flag)
225
- parser = trt.OnnxParser(network, logger)
226
- if not parser.parse_from_file(str(onnx)):
227
- raise RuntimeError(f'failed to load ONNX file: {onnx}')
228
-
229
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
230
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
231
- LOGGER.info(f'{prefix} Network Description:')
232
- for inp in inputs:
233
- LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}')
234
- for out in outputs:
235
- LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}')
236
-
237
- LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 else 32} engine in {f}')
238
- if builder.platform_has_fast_fp16:
239
- config.set_flag(trt.BuilderFlag.FP16)
240
- with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
241
- t.write(engine.serialize())
242
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
243
- return f
244
- except Exception as e:
245
- LOGGER.info(f'\n{prefix} export failure: {e}')
246
-
247
-
248
- def export_saved_model(model, im, file, dynamic,
249
- tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45,
250
- conf_thres=0.25, keras=False, prefix=colorstr('TensorFlow SavedModel:')):
251
- # YOLOv5 TensorFlow SavedModel export
252
- try:
253
- import tensorflow as tf
254
- from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
255
-
256
- from models.tf import TFDetect, TFModel
257
-
258
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
259
- f = str(file).replace('.pt', '_saved_model')
260
- batch_size, ch, *imgsz = list(im.shape) # BCHW
261
-
262
- tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
263
- im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
264
- _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
265
- inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
266
- outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
267
- keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
268
- keras_model.trainable = False
269
- keras_model.summary()
270
- if keras:
271
- keras_model.save(f, save_format='tf')
272
- else:
273
- m = tf.function(lambda x: keras_model(x)) # full model
274
- spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
275
- m = m.get_concrete_function(spec)
276
- frozen_func = convert_variables_to_constants_v2(m)
277
- tfm = tf.Module()
278
- tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec])
279
- tfm.__call__(im)
280
- tf.saved_model.save(
281
- tfm,
282
- f,
283
- options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if
284
- check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions())
285
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
286
- return keras_model, f
287
- except Exception as e:
288
- LOGGER.info(f'\n{prefix} export failure: {e}')
289
- return None, None
290
-
291
-
292
- def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')):
293
- # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
294
- try:
295
- import tensorflow as tf
296
- from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
297
-
298
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
299
- f = file.with_suffix('.pb')
300
-
301
- m = tf.function(lambda x: keras_model(x)) # full model
302
- m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
303
- frozen_func = convert_variables_to_constants_v2(m)
304
- frozen_func.graph.as_graph_def()
305
- tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
306
-
307
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
308
- return f
309
- except Exception as e:
310
- LOGGER.info(f'\n{prefix} export failure: {e}')
311
-
312
-
313
- def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')):
314
- # YOLOv5 TensorFlow Lite export
315
- try:
316
- import tensorflow as tf
317
-
318
- LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
319
- batch_size, ch, *imgsz = list(im.shape) # BCHW
320
- f = str(file).replace('.pt', '-fp16.tflite')
321
-
322
- converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
323
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
324
- converter.target_spec.supported_types = [tf.float16]
325
- converter.optimizations = [tf.lite.Optimize.DEFAULT]
326
- if int8:
327
- from models.tf import representative_dataset_gen
328
- dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data
329
- converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib)
330
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
331
- converter.target_spec.supported_types = []
332
- converter.inference_input_type = tf.uint8 # or tf.int8
333
- converter.inference_output_type = tf.uint8 # or tf.int8
334
- converter.experimental_new_quantizer = True
335
- f = str(file).replace('.pt', '-int8.tflite')
336
-
337
- tflite_model = converter.convert()
338
- open(f, "wb").write(tflite_model)
339
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
340
- return f
341
- except Exception as e:
342
- LOGGER.info(f'\n{prefix} export failure: {e}')
343
-
344
-
345
- def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')):
346
- # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
347
- try:
348
- cmd = 'edgetpu_compiler --version'
349
- help_url = 'https://coral.ai/docs/edgetpu/compiler/'
350
- assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
351
- if subprocess.run(cmd + ' >/dev/null', shell=True).returncode != 0:
352
- LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
353
- sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
354
- for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
355
- 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
356
- 'sudo apt-get update',
357
- 'sudo apt-get install edgetpu-compiler']:
358
- subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
359
- ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
360
-
361
- LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
362
- f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
363
- f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
364
-
365
- cmd = f"edgetpu_compiler -s {f_tfl}"
366
- subprocess.run(cmd, shell=True, check=True)
367
-
368
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
369
- return f
370
- except Exception as e:
371
- LOGGER.info(f'\n{prefix} export failure: {e}')
372
-
373
-
374
- def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')):
375
- # YOLOv5 TensorFlow.js export
376
- try:
377
- check_requirements(('tensorflowjs',))
378
- import re
379
-
380
- import tensorflowjs as tfjs
381
-
382
- LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
383
- f = str(file).replace('.pt', '_web_model') # js dir
384
- f_pb = file.with_suffix('.pb') # *.pb path
385
- f_json = f + '/model.json' # *.json path
386
-
387
- cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \
388
- f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}'
389
- subprocess.run(cmd, shell=True)
390
-
391
- json = open(f_json).read()
392
- with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
393
- subst = re.sub(
394
- r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
395
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
396
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
397
- r'"Identity.?.?": {"name": "Identity.?.?"}}}',
398
- r'{"outputs": {"Identity": {"name": "Identity"}, '
399
- r'"Identity_1": {"name": "Identity_1"}, '
400
- r'"Identity_2": {"name": "Identity_2"}, '
401
- r'"Identity_3": {"name": "Identity_3"}}}',
402
- json)
403
- j.write(subst)
404
-
405
- LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
406
- return f
407
- except Exception as e:
408
- LOGGER.info(f'\n{prefix} export failure: {e}')
409
-
410
-
411
- @torch.no_grad()
412
- def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
413
- weights=ROOT / 'yolov5s.pt', # weights path
414
- imgsz=(640, 640), # image (height, width)
415
- batch_size=1, # batch size
416
- device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
417
- include=('torchscript', 'onnx'), # include formats
418
- half=False, # FP16 half-precision export
419
- inplace=False, # set YOLOv5 Detect() inplace=True
420
- train=False, # model.train() mode
421
- optimize=False, # TorchScript: optimize for mobile
422
- int8=False, # CoreML/TF INT8 quantization
423
- dynamic=False, # ONNX/TF: dynamic axes
424
- simplify=False, # ONNX: simplify model
425
- opset=12, # ONNX: opset version
426
- verbose=False, # TensorRT: verbose log
427
- workspace=4, # TensorRT: workspace size (GB)
428
- nms=False, # TF: add NMS to model
429
- agnostic_nms=False, # TF: add agnostic NMS to model
430
- topk_per_class=100, # TF.js NMS: topk per class to keep
431
- topk_all=100, # TF.js NMS: topk for all classes to keep
432
- iou_thres=0.45, # TF.js NMS: IoU threshold
433
- conf_thres=0.25 # TF.js NMS: confidence threshold
434
- ):
435
- t = time.time()
436
- include = [x.lower() for x in include] # to lowercase
437
- formats = tuple(export_formats()['Argument'][1:]) # --include arguments
438
- flags = [x in include for x in formats]
439
- assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {formats}'
440
- jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans
441
- file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
442
-
443
- # Load PyTorch model
444
- device = select_device(device)
445
- assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0'
446
- model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
447
- nc, names = model.nc, model.names # number of classes, class names
448
-
449
- # Checks
450
- imgsz *= 2 if len(imgsz) == 1 else 1 # expand
451
- opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12
452
- assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}'
453
-
454
- # Input
455
- gs = int(max(model.stride)) # grid size (max stride)
456
- imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
457
- im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
458
-
459
- # Update model
460
- if half:
461
- im, model = im.half(), model.half() # to FP16
462
- model.train() if train else model.eval() # training mode = no Detect() layer grid construction
463
- for k, m in model.named_modules():
464
- if isinstance(m, Conv): # assign export-friendly activations
465
- if isinstance(m.act, nn.SiLU):
466
- m.act = SiLU()
467
- elif isinstance(m, Detect):
468
- m.inplace = inplace
469
- m.onnx_dynamic = dynamic
470
- if hasattr(m, 'forward_export'):
471
- m.forward = m.forward_export # assign custom forward (optional)
472
-
473
- for _ in range(2):
474
- y = model(im) # dry runs
475
- shape = tuple(y[0].shape) # model output shape
476
- LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
477
-
478
- # Exports
479
- f = [''] * 10 # exported filenames
480
- warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
481
- if jit:
482
- f[0] = export_torchscript(model, im, file, optimize)
483
- if engine: # TensorRT required before ONNX
484
- f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose)
485
- if onnx or xml: # OpenVINO requires ONNX
486
- f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify)
487
- if xml: # OpenVINO
488
- f[3] = export_openvino(model, im, file)
489
- if coreml:
490
- _, f[4] = export_coreml(model, im, file)
491
-
492
- # TensorFlow Exports
493
- if any((saved_model, pb, tflite, edgetpu, tfjs)):
494
- if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707
495
- check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow`
496
- assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.'
497
- model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs,
498
- agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class,
499
- topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model
500
- if pb or tfjs: # pb prerequisite to tfjs
501
- f[6] = export_pb(model, im, file)
502
- if tflite or edgetpu:
503
- f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100)
504
- if edgetpu:
505
- f[8] = export_edgetpu(model, im, file)
506
- if tfjs:
507
- f[9] = export_tfjs(model, im, file)
508
-
509
- # Finish
510
- f = [str(x) for x in f if x] # filter out '' and None
511
- if any(f):
512
- LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)'
513
- f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
514
- f"\nDetect: python detect.py --weights {f[-1]}"
515
- f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')"
516
- f"\nValidate: python val.py --weights {f[-1]}"
517
- f"\nVisualize: https://netron.app")
518
- return f # return list of exported files/dirs
519
-
520
-
521
- def parse_opt():
522
- parser = argparse.ArgumentParser()
523
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
524
- parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
525
- parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
526
- parser.add_argument('--batch-size', type=int, default=1, help='batch size')
527
- parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
528
- parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
529
- parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
530
- parser.add_argument('--train', action='store_true', help='model.train() mode')
531
- parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
532
- parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
533
- parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes')
534
- parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
535
- parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version')
536
- parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
537
- parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
538
- parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
539
- parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
540
- parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
541
- parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
542
- parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
543
- parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
544
- parser.add_argument('--include', nargs='+',
545
- default=['torchscript', 'onnx'],
546
- help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs')
547
- opt = parser.parse_args()
548
- print_args(FILE.stem, opt)
549
- return opt
550
-
551
-
552
- def main(opt):
553
- for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
554
- run(**vars(opt))
555
-
556
-
557
- if __name__ == "__main__":
558
- opt = parse_opt()
559
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/hubconf.py DELETED
@@ -1,143 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
4
-
5
- Usage:
6
- import torch
7
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
8
- model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch
9
- """
10
-
11
- import torch
12
-
13
-
14
- def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
15
- """Creates or loads a YOLOv5 model
16
-
17
- Arguments:
18
- name (str): model name 'yolov5s' or path 'path/to/best.pt'
19
- pretrained (bool): load pretrained weights into the model
20
- channels (int): number of input channels
21
- classes (int): number of model classes
22
- autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
23
- verbose (bool): print all information to screen
24
- device (str, torch.device, None): device to use for model parameters
25
-
26
- Returns:
27
- YOLOv5 model
28
- """
29
- from pathlib import Path
30
-
31
- from models.common import AutoShape, DetectMultiBackend
32
- from models.yolo import Model
33
- from utils.downloads import attempt_download
34
- from utils.general import LOGGER, check_requirements, intersect_dicts, logging
35
- from utils.torch_utils import select_device
36
-
37
- if not verbose:
38
- LOGGER.setLevel(logging.WARNING)
39
- check_requirements(exclude=('tensorboard', 'thop', 'opencv-python'))
40
- name = Path(name)
41
- path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path
42
- try:
43
- device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
44
-
45
- if pretrained and channels == 3 and classes == 80:
46
- model = DetectMultiBackend(path, device=device) # download/load FP32 model
47
- # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model
48
- else:
49
- cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
50
- model = Model(cfg, channels, classes) # create model
51
- if pretrained:
52
- ckpt = torch.load(attempt_download(path), map_location=device) # load
53
- csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
54
- csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
55
- model.load_state_dict(csd, strict=False) # load
56
- if len(ckpt['model'].names) == classes:
57
- model.names = ckpt['model'].names # set class names attribute
58
- if autoshape:
59
- model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
60
- return model.to(device)
61
-
62
- except Exception as e:
63
- help_url = 'https://github.com/ultralytics/yolov5/issues/36'
64
- s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
65
- raise Exception(s) from e
66
-
67
-
68
- def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
69
- # YOLOv5 custom or local model
70
- return _create(path, autoshape=autoshape, verbose=verbose, device=device)
71
-
72
-
73
- def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
74
- # YOLOv5-nano model https://github.com/ultralytics/yolov5
75
- return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device)
76
-
77
-
78
- def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
79
- # YOLOv5-small model https://github.com/ultralytics/yolov5
80
- return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
81
-
82
-
83
- def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
84
- # YOLOv5-medium model https://github.com/ultralytics/yolov5
85
- return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
86
-
87
-
88
- def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
89
- # YOLOv5-large model https://github.com/ultralytics/yolov5
90
- return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
91
-
92
-
93
- def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
94
- # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
95
- return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
96
-
97
-
98
- def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
99
- # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
100
- return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device)
101
-
102
-
103
- def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
104
- # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
105
- return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
106
-
107
-
108
- def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
109
- # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
110
- return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
111
-
112
-
113
- def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
114
- # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
115
- return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
116
-
117
-
118
- def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
119
- # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
120
- return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
121
-
122
-
123
- if __name__ == '__main__':
124
- model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
125
- # model = custom(path='path/to/model.pt') # custom
126
-
127
- # Verify inference
128
- from pathlib import Path
129
-
130
- import cv2
131
- import numpy as np
132
- from PIL import Image
133
-
134
- imgs = ['data/images/zidane.jpg', # filename
135
- Path('data/images/zidane.jpg'), # Path
136
- 'https://ultralytics.com/images/zidane.jpg', # URI
137
- cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
138
- Image.open('data/images/bus.jpg'), # PIL
139
- np.zeros((320, 640, 3))] # numpy
140
-
141
- results = model(imgs, size=320) # batched inference
142
- results.print()
143
- results.save()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/__init__.py DELETED
File without changes
ultralytics/yolov5/models/common.py DELETED
@@ -1,684 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Common modules
4
- """
5
-
6
- import json
7
- import math
8
- import platform
9
- import warnings
10
- from collections import OrderedDict, namedtuple
11
- from copy import copy
12
- from pathlib import Path
13
-
14
- import cv2
15
- import numpy as np
16
- import pandas as pd
17
- import requests
18
- import torch
19
- import torch.nn as nn
20
- import yaml
21
- from PIL import Image
22
- from torch.cuda import amp
23
-
24
- from utils.datasets import exif_transpose, letterbox
25
- from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path,
26
- make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh)
27
- from utils.plots import Annotator, colors, save_one_box
28
- from utils.torch_utils import copy_attr, time_sync
29
-
30
-
31
- def autopad(k, p=None): # kernel, padding
32
- # Pad to 'same'
33
- if p is None:
34
- p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad
35
- return p
36
-
37
-
38
- class Conv(nn.Module):
39
- # Standard convolution
40
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
41
- super().__init__()
42
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
43
- self.bn = nn.BatchNorm2d(c2)
44
- self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
45
-
46
- def forward(self, x):
47
- return self.act(self.bn(self.conv(x)))
48
-
49
- def forward_fuse(self, x):
50
- return self.act(self.conv(x))
51
-
52
-
53
- class DWConv(Conv):
54
- # Depth-wise convolution class
55
- def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
56
- super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
57
-
58
-
59
- class TransformerLayer(nn.Module):
60
- # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
61
- def __init__(self, c, num_heads):
62
- super().__init__()
63
- self.q = nn.Linear(c, c, bias=False)
64
- self.k = nn.Linear(c, c, bias=False)
65
- self.v = nn.Linear(c, c, bias=False)
66
- self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
67
- self.fc1 = nn.Linear(c, c, bias=False)
68
- self.fc2 = nn.Linear(c, c, bias=False)
69
-
70
- def forward(self, x):
71
- x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
72
- x = self.fc2(self.fc1(x)) + x
73
- return x
74
-
75
-
76
- class TransformerBlock(nn.Module):
77
- # Vision Transformer https://arxiv.org/abs/2010.11929
78
- def __init__(self, c1, c2, num_heads, num_layers):
79
- super().__init__()
80
- self.conv = None
81
- if c1 != c2:
82
- self.conv = Conv(c1, c2)
83
- self.linear = nn.Linear(c2, c2) # learnable position embedding
84
- self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
85
- self.c2 = c2
86
-
87
- def forward(self, x):
88
- if self.conv is not None:
89
- x = self.conv(x)
90
- b, _, w, h = x.shape
91
- p = x.flatten(2).permute(2, 0, 1)
92
- return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
93
-
94
-
95
- class Bottleneck(nn.Module):
96
- # Standard bottleneck
97
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
98
- super().__init__()
99
- c_ = int(c2 * e) # hidden channels
100
- self.cv1 = Conv(c1, c_, 1, 1)
101
- self.cv2 = Conv(c_, c2, 3, 1, g=g)
102
- self.add = shortcut and c1 == c2
103
-
104
- def forward(self, x):
105
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
106
-
107
-
108
- class BottleneckCSP(nn.Module):
109
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
110
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
111
- super().__init__()
112
- c_ = int(c2 * e) # hidden channels
113
- self.cv1 = Conv(c1, c_, 1, 1)
114
- self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
115
- self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
116
- self.cv4 = Conv(2 * c_, c2, 1, 1)
117
- self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
118
- self.act = nn.SiLU()
119
- self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
120
-
121
- def forward(self, x):
122
- y1 = self.cv3(self.m(self.cv1(x)))
123
- y2 = self.cv2(x)
124
- return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
125
-
126
-
127
- class C3(nn.Module):
128
- # CSP Bottleneck with 3 convolutions
129
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
130
- super().__init__()
131
- c_ = int(c2 * e) # hidden channels
132
- self.cv1 = Conv(c1, c_, 1, 1)
133
- self.cv2 = Conv(c1, c_, 1, 1)
134
- self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
135
- self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
136
- # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
137
-
138
- def forward(self, x):
139
- return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
140
-
141
-
142
- class C3TR(C3):
143
- # C3 module with TransformerBlock()
144
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
145
- super().__init__(c1, c2, n, shortcut, g, e)
146
- c_ = int(c2 * e)
147
- self.m = TransformerBlock(c_, c_, 4, n)
148
-
149
-
150
- class C3SPP(C3):
151
- # C3 module with SPP()
152
- def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
153
- super().__init__(c1, c2, n, shortcut, g, e)
154
- c_ = int(c2 * e)
155
- self.m = SPP(c_, c_, k)
156
-
157
-
158
- class C3Ghost(C3):
159
- # C3 module with GhostBottleneck()
160
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
161
- super().__init__(c1, c2, n, shortcut, g, e)
162
- c_ = int(c2 * e) # hidden channels
163
- self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
164
-
165
-
166
- class SPP(nn.Module):
167
- # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
168
- def __init__(self, c1, c2, k=(5, 9, 13)):
169
- super().__init__()
170
- c_ = c1 // 2 # hidden channels
171
- self.cv1 = Conv(c1, c_, 1, 1)
172
- self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
173
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
174
-
175
- def forward(self, x):
176
- x = self.cv1(x)
177
- with warnings.catch_warnings():
178
- warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
179
- return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
180
-
181
-
182
- class SPPF(nn.Module):
183
- # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
184
- def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
185
- super().__init__()
186
- c_ = c1 // 2 # hidden channels
187
- self.cv1 = Conv(c1, c_, 1, 1)
188
- self.cv2 = Conv(c_ * 4, c2, 1, 1)
189
- self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
190
-
191
- def forward(self, x):
192
- x = self.cv1(x)
193
- with warnings.catch_warnings():
194
- warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
195
- y1 = self.m(x)
196
- y2 = self.m(y1)
197
- return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
198
-
199
-
200
- class Focus(nn.Module):
201
- # Focus wh information into c-space
202
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
203
- super().__init__()
204
- self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
205
- # self.contract = Contract(gain=2)
206
-
207
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
208
- return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
209
- # return self.conv(self.contract(x))
210
-
211
-
212
- class GhostConv(nn.Module):
213
- # Ghost Convolution https://github.com/huawei-noah/ghostnet
214
- def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
215
- super().__init__()
216
- c_ = c2 // 2 # hidden channels
217
- self.cv1 = Conv(c1, c_, k, s, None, g, act)
218
- self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
219
-
220
- def forward(self, x):
221
- y = self.cv1(x)
222
- return torch.cat((y, self.cv2(y)), 1)
223
-
224
-
225
- class GhostBottleneck(nn.Module):
226
- # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
227
- def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
228
- super().__init__()
229
- c_ = c2 // 2
230
- self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
231
- DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
232
- GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
233
- self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
234
- Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
235
-
236
- def forward(self, x):
237
- return self.conv(x) + self.shortcut(x)
238
-
239
-
240
- class Contract(nn.Module):
241
- # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
242
- def __init__(self, gain=2):
243
- super().__init__()
244
- self.gain = gain
245
-
246
- def forward(self, x):
247
- b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
248
- s = self.gain
249
- x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
250
- x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
251
- return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
252
-
253
-
254
- class Expand(nn.Module):
255
- # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
256
- def __init__(self, gain=2):
257
- super().__init__()
258
- self.gain = gain
259
-
260
- def forward(self, x):
261
- b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
262
- s = self.gain
263
- x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
264
- x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
265
- return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
266
-
267
-
268
- class Concat(nn.Module):
269
- # Concatenate a list of tensors along dimension
270
- def __init__(self, dimension=1):
271
- super().__init__()
272
- self.d = dimension
273
-
274
- def forward(self, x):
275
- return torch.cat(x, self.d)
276
-
277
-
278
- class DetectMultiBackend(nn.Module):
279
- # YOLOv5 MultiBackend class for python inference on various backends
280
- def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False):
281
- # Usage:
282
- # PyTorch: weights = *.pt
283
- # TorchScript: *.torchscript
284
- # ONNX Runtime: *.onnx
285
- # ONNX OpenCV DNN: *.onnx with --dnn
286
- # OpenVINO: *.xml
287
- # CoreML: *.mlmodel
288
- # TensorRT: *.engine
289
- # TensorFlow SavedModel: *_saved_model
290
- # TensorFlow GraphDef: *.pb
291
- # TensorFlow Lite: *.tflite
292
- # TensorFlow Edge TPU: *_edgetpu.tflite
293
- from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
294
-
295
- super().__init__()
296
- w = str(weights[0] if isinstance(weights, list) else weights)
297
- pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend
298
- stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults
299
- w = attempt_download(w) # download if not local
300
- fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
301
- if data: # data.yaml path (optional)
302
- with open(data, errors='ignore') as f:
303
- names = yaml.safe_load(f)['names'] # class names
304
-
305
- if pt: # PyTorch
306
- model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
307
- stride = max(int(model.stride.max()), 32) # model stride
308
- names = model.module.names if hasattr(model, 'module') else model.names # get class names
309
- model.half() if fp16 else model.float()
310
- self.model = model # explicitly assign for to(), cpu(), cuda(), half()
311
- elif jit: # TorchScript
312
- LOGGER.info(f'Loading {w} for TorchScript inference...')
313
- extra_files = {'config.txt': ''} # model metadata
314
- model = torch.jit.load(w, _extra_files=extra_files)
315
- model.half() if fp16 else model.float()
316
- if extra_files['config.txt']:
317
- d = json.loads(extra_files['config.txt']) # extra_files dict
318
- stride, names = int(d['stride']), d['names']
319
- elif dnn: # ONNX OpenCV DNN
320
- LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
321
- check_requirements(('opencv-python>=4.5.4',))
322
- net = cv2.dnn.readNetFromONNX(w)
323
- elif onnx: # ONNX Runtime
324
- LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
325
- cuda = torch.cuda.is_available()
326
- check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
327
- import onnxruntime
328
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
329
- session = onnxruntime.InferenceSession(w, providers=providers)
330
- elif xml: # OpenVINO
331
- LOGGER.info(f'Loading {w} for OpenVINO inference...')
332
- check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
333
- import openvino.inference_engine as ie
334
- core = ie.IECore()
335
- if not Path(w).is_file(): # if not *.xml
336
- w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
337
- network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths
338
- executable_network = core.load_network(network, device_name='CPU', num_requests=1)
339
- elif engine: # TensorRT
340
- LOGGER.info(f'Loading {w} for TensorRT inference...')
341
- import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
342
- check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
343
- Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
344
- logger = trt.Logger(trt.Logger.INFO)
345
- with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
346
- model = runtime.deserialize_cuda_engine(f.read())
347
- bindings = OrderedDict()
348
- fp16 = False # default updated below
349
- for index in range(model.num_bindings):
350
- name = model.get_binding_name(index)
351
- dtype = trt.nptype(model.get_binding_dtype(index))
352
- shape = tuple(model.get_binding_shape(index))
353
- data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)
354
- bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))
355
- if model.binding_is_input(index) and dtype == np.float16:
356
- fp16 = True
357
- binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
358
- context = model.create_execution_context()
359
- batch_size = bindings['images'].shape[0]
360
- elif coreml: # CoreML
361
- LOGGER.info(f'Loading {w} for CoreML inference...')
362
- import coremltools as ct
363
- model = ct.models.MLModel(w)
364
- else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
365
- if saved_model: # SavedModel
366
- LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
367
- import tensorflow as tf
368
- keras = False # assume TF1 saved_model
369
- model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
370
- elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
371
- LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
372
- import tensorflow as tf
373
-
374
- def wrap_frozen_graph(gd, inputs, outputs):
375
- x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
376
- ge = x.graph.as_graph_element
377
- return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
378
-
379
- gd = tf.Graph().as_graph_def() # graph_def
380
- gd.ParseFromString(open(w, 'rb').read())
381
- frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0")
382
- elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
383
- try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
384
- from tflite_runtime.interpreter import Interpreter, load_delegate
385
- except ImportError:
386
- import tensorflow as tf
387
- Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
388
- if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime
389
- LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
390
- delegate = {'Linux': 'libedgetpu.so.1',
391
- 'Darwin': 'libedgetpu.1.dylib',
392
- 'Windows': 'edgetpu.dll'}[platform.system()]
393
- interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
394
- else: # Lite
395
- LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
396
- interpreter = Interpreter(model_path=w) # load TFLite model
397
- interpreter.allocate_tensors() # allocate
398
- input_details = interpreter.get_input_details() # inputs
399
- output_details = interpreter.get_output_details() # outputs
400
- elif tfjs:
401
- raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
402
- self.__dict__.update(locals()) # assign all variables to self
403
-
404
- def forward(self, im, augment=False, visualize=False, val=False):
405
- # YOLOv5 MultiBackend inference
406
- b, ch, h, w = im.shape # batch, channel, height, width
407
- if self.pt or self.jit: # PyTorch
408
- y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)
409
- return y if val else y[0]
410
- elif self.dnn: # ONNX OpenCV DNN
411
- im = im.cpu().numpy() # torch to numpy
412
- self.net.setInput(im)
413
- y = self.net.forward()
414
- elif self.onnx: # ONNX Runtime
415
- im = im.cpu().numpy() # torch to numpy
416
- y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
417
- elif self.xml: # OpenVINO
418
- im = im.cpu().numpy() # FP32
419
- desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description
420
- request = self.executable_network.requests[0] # inference request
421
- request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs))
422
- request.infer()
423
- y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs))
424
- elif self.engine: # TensorRT
425
- assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)
426
- self.binding_addrs['images'] = int(im.data_ptr())
427
- self.context.execute_v2(list(self.binding_addrs.values()))
428
- y = self.bindings['output'].data
429
- elif self.coreml: # CoreML
430
- im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
431
- im = Image.fromarray((im[0] * 255).astype('uint8'))
432
- # im = im.resize((192, 320), Image.ANTIALIAS)
433
- y = self.model.predict({'image': im}) # coordinates are xywh normalized
434
- if 'confidence' in y:
435
- box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
436
- conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
437
- y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
438
- else:
439
- k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key
440
- y = y[k] # output
441
- else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
442
- im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
443
- if self.saved_model: # SavedModel
444
- y = (self.model(im, training=False) if self.keras else self.model(im)).numpy()
445
- elif self.pb: # GraphDef
446
- y = self.frozen_func(x=self.tf.constant(im)).numpy()
447
- else: # Lite or Edge TPU
448
- input, output = self.input_details[0], self.output_details[0]
449
- int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
450
- if int8:
451
- scale, zero_point = input['quantization']
452
- im = (im / scale + zero_point).astype(np.uint8) # de-scale
453
- self.interpreter.set_tensor(input['index'], im)
454
- self.interpreter.invoke()
455
- y = self.interpreter.get_tensor(output['index'])
456
- if int8:
457
- scale, zero_point = output['quantization']
458
- y = (y.astype(np.float32) - zero_point) * scale # re-scale
459
- y[..., :4] *= [w, h, w, h] # xywh normalized to pixels
460
-
461
- if isinstance(y, np.ndarray):
462
- y = torch.tensor(y, device=self.device)
463
- return (y, []) if val else y
464
-
465
- def warmup(self, imgsz=(1, 3, 640, 640)):
466
- # Warmup model by running inference once
467
- if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types
468
- if self.device.type != 'cpu': # only warmup GPU models
469
- im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
470
- for _ in range(2 if self.jit else 1): #
471
- self.forward(im) # warmup
472
-
473
- @staticmethod
474
- def model_type(p='path/to/model.pt'):
475
- # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
476
- from export import export_formats
477
- suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes
478
- check_suffix(p, suffixes) # checks
479
- p = Path(p).name # eliminate trailing separators
480
- pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes)
481
- xml |= xml2 # *_openvino_model or *.xml
482
- tflite &= not edgetpu # *.tflite
483
- return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
484
-
485
-
486
- class AutoShape(nn.Module):
487
- # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
488
- conf = 0.25 # NMS confidence threshold
489
- iou = 0.45 # NMS IoU threshold
490
- agnostic = False # NMS class-agnostic
491
- multi_label = False # NMS multiple labels per box
492
- classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
493
- max_det = 1000 # maximum number of detections per image
494
- amp = False # Automatic Mixed Precision (AMP) inference
495
-
496
- def __init__(self, model):
497
- super().__init__()
498
- LOGGER.info('Adding AutoShape... ')
499
- copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
500
- self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
501
- self.pt = not self.dmb or model.pt # PyTorch model
502
- self.model = model.eval()
503
-
504
- def _apply(self, fn):
505
- # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
506
- self = super()._apply(fn)
507
- if self.pt:
508
- m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
509
- m.stride = fn(m.stride)
510
- m.grid = list(map(fn, m.grid))
511
- if isinstance(m.anchor_grid, list):
512
- m.anchor_grid = list(map(fn, m.anchor_grid))
513
- return self
514
-
515
- @torch.no_grad()
516
- def forward(self, imgs, size=640, augment=False, profile=False):
517
- # Inference from various sources. For height=640, width=1280, RGB images example inputs are:
518
- # file: imgs = 'data/images/zidane.jpg' # str or PosixPath
519
- # URI: = 'https://ultralytics.com/images/zidane.jpg'
520
- # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
521
- # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
522
- # numpy: = np.zeros((640,1280,3)) # HWC
523
- # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
524
- # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
525
-
526
- t = [time_sync()]
527
- p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type
528
- autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
529
- if isinstance(imgs, torch.Tensor): # torch
530
- with amp.autocast(autocast):
531
- return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
532
-
533
- # Pre-process
534
- n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
535
- shape0, shape1, files = [], [], [] # image and inference shapes, filenames
536
- for i, im in enumerate(imgs):
537
- f = f'image{i}' # filename
538
- if isinstance(im, (str, Path)): # filename or uri
539
- im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
540
- im = np.asarray(exif_transpose(im))
541
- elif isinstance(im, Image.Image): # PIL Image
542
- im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
543
- files.append(Path(f).with_suffix('.jpg').name)
544
- if im.shape[0] < 5: # image in CHW
545
- im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
546
- im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
547
- s = im.shape[:2] # HWC
548
- shape0.append(s) # image shape
549
- g = (size / max(s)) # gain
550
- shape1.append([y * g for y in s])
551
- imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
552
- shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape
553
- x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad
554
- x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
555
- x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
556
- t.append(time_sync())
557
-
558
- with amp.autocast(autocast):
559
- # Inference
560
- y = self.model(x, augment, profile) # forward
561
- t.append(time_sync())
562
-
563
- # Post-process
564
- y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic,
565
- self.multi_label, max_det=self.max_det) # NMS
566
- for i in range(n):
567
- scale_coords(shape1, y[i][:, :4], shape0[i])
568
-
569
- t.append(time_sync())
570
- return Detections(imgs, y, files, t, self.names, x.shape)
571
-
572
-
573
- class Detections:
574
- # YOLOv5 detections class for inference results
575
- def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None):
576
- super().__init__()
577
- d = pred[0].device # device
578
- gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
579
- self.imgs = imgs # list of images as numpy arrays
580
- self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
581
- self.names = names # class names
582
- self.files = files # image filenames
583
- self.times = times # profiling times
584
- self.xyxy = pred # xyxy pixels
585
- self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
586
- self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
587
- self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
588
- self.n = len(self.pred) # number of images (batch size)
589
- self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
590
- self.s = shape # inference BCHW shape
591
-
592
- def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
593
- crops = []
594
- for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
595
- s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
596
- if pred.shape[0]:
597
- for c in pred[:, -1].unique():
598
- n = (pred[:, -1] == c).sum() # detections per class
599
- s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
600
- if show or save or render or crop:
601
- annotator = Annotator(im, example=str(self.names))
602
- for *box, conf, cls in reversed(pred): # xyxy, confidence, class
603
- label = f'{self.names[int(cls)]} {conf:.2f}'
604
- if crop:
605
- file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
606
- crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,
607
- 'im': save_one_box(box, im, file=file, save=save)})
608
- else: # all others
609
- annotator.box_label(box, label, color=colors(cls))
610
- im = annotator.im
611
- else:
612
- s += '(no detections)'
613
-
614
- im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
615
- if pprint:
616
- LOGGER.info(s.rstrip(', '))
617
- if show:
618
- im.show(self.files[i]) # show
619
- if save:
620
- f = self.files[i]
621
- im.save(save_dir / f) # save
622
- if i == self.n - 1:
623
- LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
624
- if render:
625
- self.imgs[i] = np.asarray(im)
626
- if crop:
627
- if save:
628
- LOGGER.info(f'Saved results to {save_dir}\n')
629
- return crops
630
-
631
- def print(self):
632
- self.display(pprint=True) # print results
633
- LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %
634
- self.t)
635
-
636
- def show(self):
637
- self.display(show=True) # show results
638
-
639
- def save(self, save_dir='runs/detect/exp'):
640
- save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
641
- self.display(save=True, save_dir=save_dir) # save results
642
-
643
- def crop(self, save=True, save_dir='runs/detect/exp'):
644
- save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None
645
- return self.display(crop=True, save=save, save_dir=save_dir) # crop results
646
-
647
- def render(self):
648
- self.display(render=True) # render results
649
- return self.imgs
650
-
651
- def pandas(self):
652
- # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
653
- new = copy(self) # return copy
654
- ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
655
- cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
656
- for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
657
- a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
658
- setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
659
- return new
660
-
661
- def tolist(self):
662
- # return a list of Detections objects, i.e. 'for result in results.tolist():'
663
- r = range(self.n) # iterable
664
- x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
665
- # for d in x:
666
- # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
667
- # setattr(d, k, getattr(d, k)[0]) # pop out of list
668
- return x
669
-
670
- def __len__(self):
671
- return self.n
672
-
673
-
674
- class Classify(nn.Module):
675
- # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
676
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
677
- super().__init__()
678
- self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
679
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
680
- self.flat = nn.Flatten()
681
-
682
- def forward(self, x):
683
- z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
684
- return self.flat(self.conv(z)) # flatten to x(b,c2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/experimental.py DELETED
@@ -1,121 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Experimental modules
4
- """
5
- import math
6
-
7
- import numpy as np
8
- import torch
9
- import torch.nn as nn
10
-
11
- from models.common import Conv
12
- from utils.downloads import attempt_download
13
-
14
-
15
- class CrossConv(nn.Module):
16
- # Cross Convolution Downsample
17
- def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
18
- # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
19
- super().__init__()
20
- c_ = int(c2 * e) # hidden channels
21
- self.cv1 = Conv(c1, c_, (1, k), (1, s))
22
- self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
23
- self.add = shortcut and c1 == c2
24
-
25
- def forward(self, x):
26
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
27
-
28
-
29
- class Sum(nn.Module):
30
- # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
31
- def __init__(self, n, weight=False): # n: number of inputs
32
- super().__init__()
33
- self.weight = weight # apply weights boolean
34
- self.iter = range(n - 1) # iter object
35
- if weight:
36
- self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
37
-
38
- def forward(self, x):
39
- y = x[0] # no weight
40
- if self.weight:
41
- w = torch.sigmoid(self.w) * 2
42
- for i in self.iter:
43
- y = y + x[i + 1] * w[i]
44
- else:
45
- for i in self.iter:
46
- y = y + x[i + 1]
47
- return y
48
-
49
-
50
- class MixConv2d(nn.Module):
51
- # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
52
- def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
53
- super().__init__()
54
- n = len(k) # number of convolutions
55
- if equal_ch: # equal c_ per group
56
- i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
57
- c_ = [(i == g).sum() for g in range(n)] # intermediate channels
58
- else: # equal weight.numel() per group
59
- b = [c2] + [0] * n
60
- a = np.eye(n + 1, n, k=-1)
61
- a -= np.roll(a, 1, axis=1)
62
- a *= np.array(k) ** 2
63
- a[0] = 1
64
- c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
65
-
66
- self.m = nn.ModuleList(
67
- [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
68
- self.bn = nn.BatchNorm2d(c2)
69
- self.act = nn.SiLU()
70
-
71
- def forward(self, x):
72
- return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
73
-
74
-
75
- class Ensemble(nn.ModuleList):
76
- # Ensemble of models
77
- def __init__(self):
78
- super().__init__()
79
-
80
- def forward(self, x, augment=False, profile=False, visualize=False):
81
- y = []
82
- for module in self:
83
- y.append(module(x, augment, profile, visualize)[0])
84
- # y = torch.stack(y).max(0)[0] # max ensemble
85
- # y = torch.stack(y).mean(0) # mean ensemble
86
- y = torch.cat(y, 1) # nms ensemble
87
- return y, None # inference, train output
88
-
89
-
90
- def attempt_load(weights, map_location=None, inplace=True, fuse=True):
91
- from models.yolo import Detect, Model
92
-
93
- # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
94
- model = Ensemble()
95
- for w in weights if isinstance(weights, list) else [weights]:
96
- ckpt = torch.load(attempt_download(w), map_location=map_location) # load
97
- ckpt = (ckpt.get('ema') or ckpt['model']).float() # FP32 model
98
- model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode
99
-
100
- # Compatibility updates
101
- for m in model.modules():
102
- t = type(m)
103
- if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
104
- m.inplace = inplace # torch 1.7.0 compatibility
105
- if t is Detect:
106
- if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility
107
- delattr(m, 'anchor_grid')
108
- setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
109
- elif t is Conv:
110
- m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility
111
- elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
112
- m.recompute_scale_factor = None # torch 1.11.0 compatibility
113
-
114
- if len(model) == 1:
115
- return model[-1] # return model
116
- else:
117
- print(f'Ensemble created with {weights}\n')
118
- for k in ['names']:
119
- setattr(model, k, getattr(model[-1], k))
120
- model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
121
- return model # return ensemble
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/anchors.yaml DELETED
@@ -1,59 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- # Default anchors for COCO data
3
-
4
-
5
- # P5 -------------------------------------------------------------------------------------------------------------------
6
- # P5-640:
7
- anchors_p5_640:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
-
13
- # P6 -------------------------------------------------------------------------------------------------------------------
14
- # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15
- anchors_p6_640:
16
- - [9,11, 21,19, 17,41] # P3/8
17
- - [43,32, 39,70, 86,64] # P4/16
18
- - [65,131, 134,130, 120,265] # P5/32
19
- - [282,180, 247,354, 512,387] # P6/64
20
-
21
- # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22
- anchors_p6_1280:
23
- - [19,27, 44,40, 38,94] # P3/8
24
- - [96,68, 86,152, 180,137] # P4/16
25
- - [140,301, 303,264, 238,542] # P5/32
26
- - [436,615, 739,380, 925,792] # P6/64
27
-
28
- # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29
- anchors_p6_1920:
30
- - [28,41, 67,59, 57,141] # P3/8
31
- - [144,103, 129,227, 270,205] # P4/16
32
- - [209,452, 455,396, 358,812] # P5/32
33
- - [653,922, 1109,570, 1387,1187] # P6/64
34
-
35
-
36
- # P7 -------------------------------------------------------------------------------------------------------------------
37
- # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38
- anchors_p7_640:
39
- - [11,11, 13,30, 29,20] # P3/8
40
- - [30,46, 61,38, 39,92] # P4/16
41
- - [78,80, 146,66, 79,163] # P5/32
42
- - [149,150, 321,143, 157,303] # P6/64
43
- - [257,402, 359,290, 524,372] # P7/128
44
-
45
- # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46
- anchors_p7_1280:
47
- - [19,22, 54,36, 32,77] # P3/8
48
- - [70,83, 138,71, 75,173] # P4/16
49
- - [165,159, 148,334, 375,151] # P5/32
50
- - [334,317, 251,626, 499,474] # P6/64
51
- - [750,326, 534,814, 1079,818] # P7/128
52
-
53
- # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54
- anchors_p7_1920:
55
- - [29,34, 81,55, 47,115] # P3/8
56
- - [105,124, 207,107, 113,259] # P4/16
57
- - [247,238, 222,500, 563,227] # P5/32
58
- - [501,476, 376,939, 749,711] # P6/64
59
- - [1126,489, 801,1222, 1618,1227] # P7/128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov3-spp.yaml DELETED
@@ -1,51 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
- # darknet53 backbone
13
- backbone:
14
- # [from, number, module, args]
15
- [[-1, 1, Conv, [32, 3, 1]], # 0
16
- [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
- [-1, 1, Bottleneck, [64]],
18
- [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
- [-1, 2, Bottleneck, [128]],
20
- [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
- [-1, 8, Bottleneck, [256]],
22
- [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
- [-1, 8, Bottleneck, [512]],
24
- [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
- [-1, 4, Bottleneck, [1024]], # 10
26
- ]
27
-
28
- # YOLOv3-SPP head
29
- head:
30
- [[-1, 1, Bottleneck, [1024, False]],
31
- [-1, 1, SPP, [512, [5, 9, 13]]],
32
- [-1, 1, Conv, [1024, 3, 1]],
33
- [-1, 1, Conv, [512, 1, 1]],
34
- [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
-
36
- [-2, 1, Conv, [256, 1, 1]],
37
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
- [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
- [-1, 1, Bottleneck, [512, False]],
40
- [-1, 1, Bottleneck, [512, False]],
41
- [-1, 1, Conv, [256, 1, 1]],
42
- [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
-
44
- [-2, 1, Conv, [128, 1, 1]],
45
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
- [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
- [-1, 1, Bottleneck, [256, False]],
48
- [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
-
50
- [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov3-tiny.yaml DELETED
@@ -1,41 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,14, 23,27, 37,58] # P4/16
9
- - [81,82, 135,169, 344,319] # P5/32
10
-
11
- # YOLOv3-tiny backbone
12
- backbone:
13
- # [from, number, module, args]
14
- [[-1, 1, Conv, [16, 3, 1]], # 0
15
- [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16
- [-1, 1, Conv, [32, 3, 1]],
17
- [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18
- [-1, 1, Conv, [64, 3, 1]],
19
- [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20
- [-1, 1, Conv, [128, 3, 1]],
21
- [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22
- [-1, 1, Conv, [256, 3, 1]],
23
- [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24
- [-1, 1, Conv, [512, 3, 1]],
25
- [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26
- [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27
- ]
28
-
29
- # YOLOv3-tiny head
30
- head:
31
- [[-1, 1, Conv, [1024, 3, 1]],
32
- [-1, 1, Conv, [256, 1, 1]],
33
- [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34
-
35
- [-2, 1, Conv, [128, 1, 1]],
36
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
- [[-1, 8], 1, Concat, [1]], # cat backbone P4
38
- [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39
-
40
- [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov3.yaml DELETED
@@ -1,51 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
- # darknet53 backbone
13
- backbone:
14
- # [from, number, module, args]
15
- [[-1, 1, Conv, [32, 3, 1]], # 0
16
- [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
- [-1, 1, Bottleneck, [64]],
18
- [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
- [-1, 2, Bottleneck, [128]],
20
- [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
- [-1, 8, Bottleneck, [256]],
22
- [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
- [-1, 8, Bottleneck, [512]],
24
- [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
- [-1, 4, Bottleneck, [1024]], # 10
26
- ]
27
-
28
- # YOLOv3 head
29
- head:
30
- [[-1, 1, Bottleneck, [1024, False]],
31
- [-1, 1, Conv, [512, 1, 1]],
32
- [-1, 1, Conv, [1024, 3, 1]],
33
- [-1, 1, Conv, [512, 1, 1]],
34
- [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
-
36
- [-2, 1, Conv, [256, 1, 1]],
37
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
- [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
- [-1, 1, Bottleneck, [512, False]],
40
- [-1, 1, Bottleneck, [512, False]],
41
- [-1, 1, Conv, [256, 1, 1]],
42
- [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
-
44
- [-2, 1, Conv, [128, 1, 1]],
45
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
- [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
- [-1, 1, Bottleneck, [256, False]],
48
- [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
-
50
- [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-bifpn.yaml DELETED
@@ -1,48 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
- # YOLOv5 v6.0 backbone
13
- backbone:
14
- # [from, number, module, args]
15
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
- [-1, 3, C3, [128]],
18
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
- [-1, 6, C3, [256]],
20
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
- [-1, 9, C3, [512]],
22
- [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
- [-1, 3, C3, [1024]],
24
- [-1, 1, SPPF, [1024, 5]], # 9
25
- ]
26
-
27
- # YOLOv5 v6.0 BiFPN head
28
- head:
29
- [[-1, 1, Conv, [512, 1, 1]],
30
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
- [-1, 3, C3, [512, False]], # 13
33
-
34
- [-1, 1, Conv, [256, 1, 1]],
35
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
- [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
-
39
- [-1, 1, Conv, [256, 3, 2]],
40
- [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41
- [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
-
43
- [-1, 1, Conv, [512, 3, 2]],
44
- [[-1, 10], 1, Concat, [1]], # cat head P5
45
- [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
-
47
- [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-fpn.yaml DELETED
@@ -1,42 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
- # YOLOv5 v6.0 backbone
13
- backbone:
14
- # [from, number, module, args]
15
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
- [-1, 3, C3, [128]],
18
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
- [-1, 6, C3, [256]],
20
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
- [-1, 9, C3, [512]],
22
- [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
- [-1, 3, C3, [1024]],
24
- [-1, 1, SPPF, [1024, 5]], # 9
25
- ]
26
-
27
- # YOLOv5 v6.0 FPN head
28
- head:
29
- [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30
-
31
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
- [-1, 1, Conv, [512, 1, 1]],
34
- [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35
-
36
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
- [-1, 1, Conv, [256, 1, 1]],
39
- [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40
-
41
- [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-p2.yaml DELETED
@@ -1,54 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
-
9
- # YOLOv5 v6.0 backbone
10
- backbone:
11
- # [from, number, module, args]
12
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
- [-1, 3, C3, [128]],
15
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
- [-1, 6, C3, [256]],
17
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
- [-1, 9, C3, [512]],
19
- [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20
- [-1, 3, C3, [1024]],
21
- [-1, 1, SPPF, [1024, 5]], # 9
22
- ]
23
-
24
- # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25
- head:
26
- [[-1, 1, Conv, [512, 1, 1]],
27
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
29
- [-1, 3, C3, [512, False]], # 13
30
-
31
- [-1, 1, Conv, [256, 1, 1]],
32
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
34
- [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35
-
36
- [-1, 1, Conv, [128, 1, 1]],
37
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
- [[-1, 2], 1, Concat, [1]], # cat backbone P2
39
- [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40
-
41
- [-1, 1, Conv, [128, 3, 2]],
42
- [[-1, 18], 1, Concat, [1]], # cat head P3
43
- [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44
-
45
- [-1, 1, Conv, [256, 3, 2]],
46
- [[-1, 14], 1, Concat, [1]], # cat head P4
47
- [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48
-
49
- [-1, 1, Conv, [512, 3, 2]],
50
- [[-1, 10], 1, Concat, [1]], # cat head P5
51
- [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52
-
53
- [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-p34.yaml DELETED
@@ -1,41 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 0.33 # model depth multiple
6
- width_multiple: 0.50 # layer channel multiple
7
- anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
-
9
- # YOLOv5 v6.0 backbone
10
- backbone:
11
- # [from, number, module, args]
12
- [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13
- [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
- [ -1, 3, C3, [ 128 ] ],
15
- [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
- [ -1, 6, C3, [ 256 ] ],
17
- [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
- [ -1, 9, C3, [ 512 ] ],
19
- [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20
- [ -1, 3, C3, [ 1024 ] ],
21
- [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22
- ]
23
-
24
- # YOLOv5 v6.0 head with (P3, P4) outputs
25
- head:
26
- [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27
- [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28
- [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29
- [ -1, 3, C3, [ 512, False ] ], # 13
30
-
31
- [ -1, 1, Conv, [ 256, 1, 1 ] ],
32
- [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33
- [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34
- [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35
-
36
- [ -1, 1, Conv, [ 256, 3, 2 ] ],
37
- [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38
- [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39
-
40
- [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-p6.yaml DELETED
@@ -1,56 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
-
9
- # YOLOv5 v6.0 backbone
10
- backbone:
11
- # [from, number, module, args]
12
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
- [-1, 3, C3, [128]],
15
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
- [-1, 6, C3, [256]],
17
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
- [-1, 9, C3, [512]],
19
- [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
- [-1, 3, C3, [768]],
21
- [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
- [-1, 3, C3, [1024]],
23
- [-1, 1, SPPF, [1024, 5]], # 11
24
- ]
25
-
26
- # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27
- head:
28
- [[-1, 1, Conv, [768, 1, 1]],
29
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30
- [[-1, 8], 1, Concat, [1]], # cat backbone P5
31
- [-1, 3, C3, [768, False]], # 15
32
-
33
- [-1, 1, Conv, [512, 1, 1]],
34
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
36
- [-1, 3, C3, [512, False]], # 19
37
-
38
- [-1, 1, Conv, [256, 1, 1]],
39
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
41
- [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42
-
43
- [-1, 1, Conv, [256, 3, 2]],
44
- [[-1, 20], 1, Concat, [1]], # cat head P4
45
- [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46
-
47
- [-1, 1, Conv, [512, 3, 2]],
48
- [[-1, 16], 1, Concat, [1]], # cat head P5
49
- [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50
-
51
- [-1, 1, Conv, [768, 3, 2]],
52
- [[-1, 12], 1, Concat, [1]], # cat head P6
53
- [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54
-
55
- [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-p7.yaml DELETED
@@ -1,67 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
-
9
- # YOLOv5 v6.0 backbone
10
- backbone:
11
- # [from, number, module, args]
12
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
- [-1, 3, C3, [128]],
15
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
- [-1, 6, C3, [256]],
17
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
- [-1, 9, C3, [512]],
19
- [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
- [-1, 3, C3, [768]],
21
- [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
- [-1, 3, C3, [1024]],
23
- [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24
- [-1, 3, C3, [1280]],
25
- [-1, 1, SPPF, [1280, 5]], # 13
26
- ]
27
-
28
- # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29
- head:
30
- [[-1, 1, Conv, [1024, 1, 1]],
31
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
- [[-1, 10], 1, Concat, [1]], # cat backbone P6
33
- [-1, 3, C3, [1024, False]], # 17
34
-
35
- [-1, 1, Conv, [768, 1, 1]],
36
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
- [[-1, 8], 1, Concat, [1]], # cat backbone P5
38
- [-1, 3, C3, [768, False]], # 21
39
-
40
- [-1, 1, Conv, [512, 1, 1]],
41
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
43
- [-1, 3, C3, [512, False]], # 25
44
-
45
- [-1, 1, Conv, [256, 1, 1]],
46
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
48
- [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49
-
50
- [-1, 1, Conv, [256, 3, 2]],
51
- [[-1, 26], 1, Concat, [1]], # cat head P4
52
- [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53
-
54
- [-1, 1, Conv, [512, 3, 2]],
55
- [[-1, 22], 1, Concat, [1]], # cat head P5
56
- [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57
-
58
- [-1, 1, Conv, [768, 3, 2]],
59
- [[-1, 18], 1, Concat, [1]], # cat head P6
60
- [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61
-
62
- [-1, 1, Conv, [1024, 3, 2]],
63
- [[-1, 14], 1, Concat, [1]], # cat head P7
64
- [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65
-
66
- [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ultralytics/yolov5/models/hub/yolov5-panet.yaml DELETED
@@ -1,48 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
-
3
- # Parameters
4
- nc: 80 # number of classes
5
- depth_multiple: 1.0 # model depth multiple
6
- width_multiple: 1.0 # layer channel multiple
7
- anchors:
8
- - [10,13, 16,30, 33,23] # P3/8
9
- - [30,61, 62,45, 59,119] # P4/16
10
- - [116,90, 156,198, 373,326] # P5/32
11
-
12
- # YOLOv5 v6.0 backbone
13
- backbone:
14
- # [from, number, module, args]
15
- [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
- [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
- [-1, 3, C3, [128]],
18
- [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
- [-1, 6, C3, [256]],
20
- [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
- [-1, 9, C3, [512]],
22
- [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
- [-1, 3, C3, [1024]],
24
- [-1, 1, SPPF, [1024, 5]], # 9
25
- ]
26
-
27
- # YOLOv5 v6.0 PANet head
28
- head:
29
- [[-1, 1, Conv, [512, 1, 1]],
30
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
- [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
- [-1, 3, C3, [512, False]], # 13
33
-
34
- [-1, 1, Conv, [256, 1, 1]],
35
- [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
- [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
- [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
-
39
- [-1, 1, Conv, [256, 3, 2]],
40
- [[-1, 14], 1, Concat, [1]], # cat head P4
41
- [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
-
43
- [-1, 1, Conv, [512, 3, 2]],
44
- [[-1, 10], 1, Concat, [1]], # cat head P5
45
- [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
-
47
- [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
- ]