Add pre-commit CI actions (#4982)
Browse files* define pre-commit
* add CI code
* configure
* apply pre-commit
* fstring
* apply MD
* pre-commit
* Update torch_utils.py
* Update print strings
* notes
* Cleanup code-format.yml
* Update setup.cfg
* Update .pre-commit-config.yaml
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
- .github/ISSUE_TEMPLATE/feature-request.md +1 -1
- .github/workflows/ci-testing.yml +1 -1
- .github/workflows/code-format.yml +47 -0
- .github/workflows/codeql-analysis.yml +1 -1
- .github/workflows/greetings.yml +0 -1
- .gitignore +1 -0
- .pre-commit-config.yaml +67 -0
- LICENSE +1 -1
- README.md +5 -5
- data/Objects365.yaml +6 -6
- data/coco128.yaml +1 -1
- data/hyps/hyp.scratch-high.yaml +1 -1
- data/hyps/hyp.scratch-low.yaml +1 -1
- models/common.py +5 -5
- models/experimental.py +0 -1
- models/hub/yolov5-bifpn.yaml +1 -1
- models/tf.py +20 -20
- models/yolo.py +5 -5
- setup.cfg +45 -0
- tutorial.ipynb +1 -1
- utils/datasets.py +12 -12
- utils/general.py +2 -2
- utils/google_app_engine/app.yaml +1 -1
- utils/loggers/__init__.py +1 -1
- utils/loggers/wandb/README.md +16 -16
- utils/loggers/wandb/sweep.yaml +5 -5
- utils/loggers/wandb/wandb_utils.py +16 -14
- utils/loss.py +3 -3
- utils/plots.py +4 -4
- utils/torch_utils.py +3 -3
.github/ISSUE_TEMPLATE/feature-request.md
CHANGED
@@ -13,7 +13,7 @@ assignees: ''
|
|
13 |
|
14 |
## Motivation
|
15 |
|
16 |
-
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem?
|
17 |
e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
|
18 |
|
19 |
## Pitch
|
|
|
13 |
|
14 |
## Motivation
|
15 |
|
16 |
+
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem?
|
17 |
e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
|
18 |
|
19 |
## Pitch
|
.github/workflows/ci-testing.yml
CHANGED
@@ -83,7 +83,7 @@ jobs:
|
|
83 |
# Python
|
84 |
python - <<EOF
|
85 |
import torch
|
86 |
-
# Known issue, urllib.error.HTTPError: HTTP Error 403: rate limit exceeded, will be resolved in torch==1.10.0
|
87 |
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='runs/train/exp/weights/last.pt')
|
88 |
EOF
|
89 |
|
|
|
83 |
# Python
|
84 |
python - <<EOF
|
85 |
import torch
|
86 |
+
# Known issue, urllib.error.HTTPError: HTTP Error 403: rate limit exceeded, will be resolved in torch==1.10.0
|
87 |
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='runs/train/exp/weights/last.pt')
|
88 |
EOF
|
89 |
|
.github/workflows/code-format.yml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Run code formatting GitHub Action, can be replaced by this bot: https://github.com/marketplace/pre-commit-ci
|
2 |
+
|
3 |
+
name: Code formatting
|
4 |
+
|
5 |
+
on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows
|
6 |
+
push:
|
7 |
+
branches: [master]
|
8 |
+
pull_request: {}
|
9 |
+
|
10 |
+
jobs:
|
11 |
+
pep8-check-flake8:
|
12 |
+
runs-on: ubuntu-20.04
|
13 |
+
steps:
|
14 |
+
- uses: actions/checkout@master
|
15 |
+
- uses: actions/setup-python@v2
|
16 |
+
with:
|
17 |
+
python-version: 3.7
|
18 |
+
- name: Install dependencies
|
19 |
+
run: |
|
20 |
+
pip install flake8
|
21 |
+
pip list
|
22 |
+
shell: bash
|
23 |
+
- name: PEP8
|
24 |
+
run: |
|
25 |
+
flake8 .
|
26 |
+
|
27 |
+
pre-commit-check:
|
28 |
+
runs-on: ubuntu-20.04
|
29 |
+
steps:
|
30 |
+
- uses: actions/checkout@v2
|
31 |
+
# for private repo - first is the checkout step, which needs to use unlimited fetch depth for pushing
|
32 |
+
with:
|
33 |
+
fetch-depth: 0
|
34 |
+
- uses: actions/setup-python@v2
|
35 |
+
|
36 |
+
- name: set PY
|
37 |
+
run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
|
38 |
+
- uses: actions/cache@v2
|
39 |
+
with:
|
40 |
+
path: ~/.cache/pre-commit
|
41 |
+
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
42 |
+
|
43 |
+
- uses: pre-commit/action@v2.0.3
|
44 |
+
# this action also provides an additional behaviour when used in private repositories
|
45 |
+
# when configured with a github token, the action will push back fixes to the pull request branch
|
46 |
+
with:
|
47 |
+
token: ${{ secrets.GITHUB_TOKEN }}
|
.github/workflows/codeql-analysis.yml
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
|
2 |
# https://github.com/github/codeql-action
|
3 |
|
4 |
name: "CodeQL"
|
|
|
1 |
+
# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
|
2 |
# https://github.com/github/codeql-action
|
3 |
|
4 |
name: "CodeQL"
|
.github/workflows/greetings.yml
CHANGED
@@ -57,4 +57,3 @@ jobs:
|
|
57 |
<a href="https://github.com/ultralytics/yolov5/actions"><img src="https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg" alt="CI CPU testing"></a>
|
58 |
|
59 |
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
|
60 |
-
|
|
|
57 |
<a href="https://github.com/ultralytics/yolov5/actions"><img src="https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg" alt="CI CPU testing"></a>
|
58 |
|
59 |
If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.
|
|
.gitignore
CHANGED
@@ -20,6 +20,7 @@
|
|
20 |
*.data
|
21 |
*.json
|
22 |
*.cfg
|
|
|
23 |
!cfg/yolov3*.cfg
|
24 |
|
25 |
storage.googleapis.com
|
|
|
20 |
*.data
|
21 |
*.json
|
22 |
*.cfg
|
23 |
+
!setup.cfg
|
24 |
!cfg/yolov3*.cfg
|
25 |
|
26 |
storage.googleapis.com
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Define hooks for code formations
|
2 |
+
# Will be applied on any updated commit files if a user has installed and linked commit hook
|
3 |
+
|
4 |
+
default_language_version:
|
5 |
+
python: python3.8
|
6 |
+
|
7 |
+
# Define bot property if installed via https://github.com/marketplace/pre-commit-ci
|
8 |
+
ci:
|
9 |
+
autofix_prs: true
|
10 |
+
autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
|
11 |
+
autoupdate_schedule: quarterly
|
12 |
+
# submodules: true
|
13 |
+
|
14 |
+
repos:
|
15 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
16 |
+
rev: v4.0.1
|
17 |
+
hooks:
|
18 |
+
- id: end-of-file-fixer
|
19 |
+
- id: trailing-whitespace
|
20 |
+
- id: check-case-conflict
|
21 |
+
- id: check-yaml
|
22 |
+
- id: check-toml
|
23 |
+
- id: pretty-format-json
|
24 |
+
- id: check-docstring-first
|
25 |
+
|
26 |
+
- repo: https://github.com/asottile/pyupgrade
|
27 |
+
rev: v2.23.1
|
28 |
+
hooks:
|
29 |
+
- id: pyupgrade
|
30 |
+
args: [--py36-plus]
|
31 |
+
name: Upgrade code
|
32 |
+
|
33 |
+
# TODO
|
34 |
+
#- repo: https://github.com/PyCQA/isort
|
35 |
+
# rev: 5.9.3
|
36 |
+
# hooks:
|
37 |
+
# - id: isort
|
38 |
+
# name: imports
|
39 |
+
|
40 |
+
# TODO
|
41 |
+
#- repo: https://github.com/pre-commit/mirrors-yapf
|
42 |
+
# rev: v0.31.0
|
43 |
+
# hooks:
|
44 |
+
# - id: yapf
|
45 |
+
# name: formatting
|
46 |
+
|
47 |
+
# TODO
|
48 |
+
#- repo: https://github.com/executablebooks/mdformat
|
49 |
+
# rev: 0.7.7
|
50 |
+
# hooks:
|
51 |
+
# - id: mdformat
|
52 |
+
# additional_dependencies:
|
53 |
+
# - mdformat-gfm
|
54 |
+
# - mdformat-black
|
55 |
+
# - mdformat_frontmatter
|
56 |
+
|
57 |
+
# TODO
|
58 |
+
#- repo: https://github.com/asottile/yesqa
|
59 |
+
# rev: v1.2.3
|
60 |
+
# hooks:
|
61 |
+
# - id: yesqa
|
62 |
+
|
63 |
+
- repo: https://github.com/PyCQA/flake8
|
64 |
+
rev: 3.9.2
|
65 |
+
hooks:
|
66 |
+
- id: flake8
|
67 |
+
name: PEP8
|
LICENSE
CHANGED
@@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you
|
|
671 |
may consider it more useful to permit linking proprietary applications with
|
672 |
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
Public License instead of this License. But first, please read
|
674 |
-
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
|
671 |
may consider it more useful to permit linking proprietary applications with
|
672 |
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
Public License instead of this License. But first, please read
|
674 |
+
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
README.md
CHANGED
@@ -46,7 +46,7 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained
|
|
46 |
open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
|
47 |
</p>
|
48 |
|
49 |
-
<!--
|
50 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
51 |
<img width="800" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-api.png"></a>
|
52 |
-->
|
@@ -109,7 +109,7 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and
|
|
109 |
|
110 |
```bash
|
111 |
$ python detect.py --source 0 # webcam
|
112 |
-
file.jpg # image
|
113 |
file.mp4 # video
|
114 |
path/ # directory
|
115 |
path/*.jpg # glob
|
@@ -136,7 +136,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
|
|
136 |
|
137 |
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
|
138 |
|
139 |
-
</details>
|
140 |
|
141 |
<details open>
|
142 |
<summary>Tutorials</summary>
|
@@ -178,7 +178,7 @@ Get started in seconds with our verified environments. Click each icon below for
|
|
178 |
<a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
|
179 |
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="15%"/>
|
180 |
</a>
|
181 |
-
</div>
|
182 |
|
183 |
## <div align="center">Integrations</div>
|
184 |
|
@@ -239,7 +239,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
|
|
239 |
|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6
|
240 |
|[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0
|
241 |
|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4
|
242 |
-
|[YOLOv5x6][assets]<br>+ [TTA][TTA]|1280<br>1536 |54.7<br>**55.4** |**72.4**<br>72.3 |3136<br>- |26.2<br>- |19.4<br>- |140.7<br>- |209.8<br>-
|
243 |
|
244 |
<details>
|
245 |
<summary>Table Notes (click to expand)</summary>
|
|
|
46 |
open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.
|
47 |
</p>
|
48 |
|
49 |
+
<!--
|
50 |
<a align="center" href="https://ultralytics.com/yolov5" target="_blank">
|
51 |
<img width="800" src="https://github.com/ultralytics/yolov5/releases/download/v1.0/banner-api.png"></a>
|
52 |
-->
|
|
|
109 |
|
110 |
```bash
|
111 |
$ python detect.py --source 0 # webcam
|
112 |
+
file.jpg # image
|
113 |
file.mp4 # video
|
114 |
path/ # directory
|
115 |
path/*.jpg # glob
|
|
|
136 |
|
137 |
<img width="800" src="https://user-images.githubusercontent.com/26833433/90222759-949d8800-ddc1-11ea-9fa1-1c97eed2b963.png">
|
138 |
|
139 |
+
</details>
|
140 |
|
141 |
<details open>
|
142 |
<summary>Tutorials</summary>
|
|
|
178 |
<a href="https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart">
|
179 |
<img src="https://github.com/ultralytics/yolov5/releases/download/v1.0/logo-gcp-small.png" width="15%"/>
|
180 |
</a>
|
181 |
+
</div>
|
182 |
|
183 |
## <div align="center">Integrations</div>
|
184 |
|
|
|
239 |
|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6
|
240 |
|[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0
|
241 |
|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4
|
242 |
+
|[YOLOv5x6][assets]<br>+ [TTA][TTA]|1280<br>1536 |54.7<br>**55.4** |**72.4**<br>72.3 |3136<br>- |26.2<br>- |19.4<br>- |140.7<br>- |209.8<br>-
|
243 |
|
244 |
<details>
|
245 |
<summary>Table Notes (click to expand)</summary>
|
data/Objects365.yaml
CHANGED
@@ -62,21 +62,21 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla
|
|
62 |
download: |
|
63 |
from pycocotools.coco import COCO
|
64 |
from tqdm import tqdm
|
65 |
-
|
66 |
from utils.general import Path, download, np, xyxy2xywhn
|
67 |
-
|
68 |
# Make Directories
|
69 |
dir = Path(yaml['path']) # dataset root dir
|
70 |
for p in 'images', 'labels':
|
71 |
(dir / p).mkdir(parents=True, exist_ok=True)
|
72 |
for q in 'train', 'val':
|
73 |
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
74 |
-
|
75 |
# Train, Val Splits
|
76 |
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
|
77 |
print(f"Processing {split} in {patches} patches ...")
|
78 |
images, labels = dir / 'images' / split, dir / 'labels' / split
|
79 |
-
|
80 |
# Download
|
81 |
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
|
82 |
if split == 'train':
|
@@ -86,11 +86,11 @@ download: |
|
|
86 |
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
|
87 |
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
|
88 |
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
|
89 |
-
|
90 |
# Move
|
91 |
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
|
92 |
f.rename(images / f.name) # move to /images/{split}
|
93 |
-
|
94 |
# Labels
|
95 |
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
|
96 |
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
|
|
62 |
download: |
|
63 |
from pycocotools.coco import COCO
|
64 |
from tqdm import tqdm
|
65 |
+
|
66 |
from utils.general import Path, download, np, xyxy2xywhn
|
67 |
+
|
68 |
# Make Directories
|
69 |
dir = Path(yaml['path']) # dataset root dir
|
70 |
for p in 'images', 'labels':
|
71 |
(dir / p).mkdir(parents=True, exist_ok=True)
|
72 |
for q in 'train', 'val':
|
73 |
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
74 |
+
|
75 |
# Train, Val Splits
|
76 |
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
|
77 |
print(f"Processing {split} in {patches} patches ...")
|
78 |
images, labels = dir / 'images' / split, dir / 'labels' / split
|
79 |
+
|
80 |
# Download
|
81 |
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
|
82 |
if split == 'train':
|
|
|
86 |
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
|
87 |
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
|
88 |
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
|
89 |
+
|
90 |
# Move
|
91 |
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
|
92 |
f.rename(images / f.name) # move to /images/{split}
|
93 |
+
|
94 |
# Labels
|
95 |
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
|
96 |
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
data/coco128.yaml
CHANGED
@@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't
|
|
27 |
|
28 |
|
29 |
# Download script/URL (optional)
|
30 |
-
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
|
|
|
27 |
|
28 |
|
29 |
# Download script/URL (optional)
|
30 |
+
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
|
data/hyps/hyp.scratch-high.yaml
CHANGED
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
|
|
31 |
fliplr: 0.5 # image flip left-right (probability)
|
32 |
mosaic: 1.0 # image mosaic (probability)
|
33 |
mixup: 0.1 # image mixup (probability)
|
34 |
-
copy_paste: 0.1 # segment copy-paste (probability)
|
|
|
31 |
fliplr: 0.5 # image flip left-right (probability)
|
32 |
mosaic: 1.0 # image mosaic (probability)
|
33 |
mixup: 0.1 # image mixup (probability)
|
34 |
+
copy_paste: 0.1 # segment copy-paste (probability)
|
data/hyps/hyp.scratch-low.yaml
CHANGED
@@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability)
|
|
31 |
fliplr: 0.5 # image flip left-right (probability)
|
32 |
mosaic: 1.0 # image mosaic (probability)
|
33 |
mixup: 0.0 # image mixup (probability)
|
34 |
-
copy_paste: 0.0 # segment copy-paste (probability)
|
|
|
31 |
fliplr: 0.5 # image flip left-right (probability)
|
32 |
mosaic: 1.0 # image mosaic (probability)
|
33 |
mixup: 0.0 # image mixup (probability)
|
34 |
+
copy_paste: 0.0 # segment copy-paste (probability)
|
models/common.py
CHANGED
@@ -79,7 +79,7 @@ class TransformerBlock(nn.Module):
|
|
79 |
if c1 != c2:
|
80 |
self.conv = Conv(c1, c2)
|
81 |
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
82 |
-
self.tr = nn.Sequential(*
|
83 |
self.c2 = c2
|
84 |
|
85 |
def forward(self, x):
|
@@ -114,7 +114,7 @@ class BottleneckCSP(nn.Module):
|
|
114 |
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
115 |
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
116 |
self.act = nn.LeakyReLU(0.1, inplace=True)
|
117 |
-
self.m = nn.Sequential(*
|
118 |
|
119 |
def forward(self, x):
|
120 |
y1 = self.cv3(self.m(self.cv1(x)))
|
@@ -130,7 +130,7 @@ class C3(nn.Module):
|
|
130 |
self.cv1 = Conv(c1, c_, 1, 1)
|
131 |
self.cv2 = Conv(c1, c_, 1, 1)
|
132 |
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
|
133 |
-
self.m = nn.Sequential(*
|
134 |
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
|
135 |
|
136 |
def forward(self, x):
|
@@ -158,7 +158,7 @@ class C3Ghost(C3):
|
|
158 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
159 |
super().__init__(c1, c2, n, shortcut, g, e)
|
160 |
c_ = int(c2 * e) # hidden channels
|
161 |
-
self.m = nn.Sequential(*
|
162 |
|
163 |
|
164 |
class SPP(nn.Module):
|
@@ -362,7 +362,7 @@ class Detections:
|
|
362 |
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
|
363 |
super().__init__()
|
364 |
d = pred[0].device # device
|
365 |
-
gn = [torch.tensor([*
|
366 |
self.imgs = imgs # list of images as numpy arrays
|
367 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
368 |
self.names = names # class names
|
|
|
79 |
if c1 != c2:
|
80 |
self.conv = Conv(c1, c2)
|
81 |
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
82 |
+
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
|
83 |
self.c2 = c2
|
84 |
|
85 |
def forward(self, x):
|
|
|
114 |
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
115 |
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
116 |
self.act = nn.LeakyReLU(0.1, inplace=True)
|
117 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
118 |
|
119 |
def forward(self, x):
|
120 |
y1 = self.cv3(self.m(self.cv1(x)))
|
|
|
130 |
self.cv1 = Conv(c1, c_, 1, 1)
|
131 |
self.cv2 = Conv(c1, c_, 1, 1)
|
132 |
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
|
133 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
134 |
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
|
135 |
|
136 |
def forward(self, x):
|
|
|
158 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
159 |
super().__init__(c1, c2, n, shortcut, g, e)
|
160 |
c_ = int(c2 * e) # hidden channels
|
161 |
+
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
|
162 |
|
163 |
|
164 |
class SPP(nn.Module):
|
|
|
362 |
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
|
363 |
super().__init__()
|
364 |
d = pred[0].device # device
|
365 |
+
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations
|
366 |
self.imgs = imgs # list of images as numpy arrays
|
367 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
368 |
self.names = names # class names
|
models/experimental.py
CHANGED
@@ -97,7 +97,6 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True):
|
|
97 |
else:
|
98 |
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
|
99 |
|
100 |
-
|
101 |
# Compatibility updates
|
102 |
for m in model.modules():
|
103 |
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
|
|
|
97 |
else:
|
98 |
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
|
99 |
|
|
|
100 |
# Compatibility updates
|
101 |
for m in model.modules():
|
102 |
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
|
models/hub/yolov5-bifpn.yaml
CHANGED
@@ -18,7 +18,7 @@ backbone:
|
|
18 |
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, C3, [256]],
|
20 |
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
-
[-1, 9, C3, [512]]
|
22 |
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
[-1, 3, C3, [1024, False]], # 9
|
|
|
18 |
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
19 |
[-1, 9, C3, [256]],
|
20 |
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
21 |
+
[-1, 9, C3, [512]],
|
22 |
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
23 |
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
24 |
[-1, 3, C3, [1024, False]], # 9
|
models/tf.py
CHANGED
@@ -40,7 +40,7 @@ LOGGER = logging.getLogger(__name__)
|
|
40 |
class TFBN(keras.layers.Layer):
|
41 |
# TensorFlow BatchNormalization wrapper
|
42 |
def __init__(self, w=None):
|
43 |
-
super(
|
44 |
self.bn = keras.layers.BatchNormalization(
|
45 |
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
46 |
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
@@ -54,7 +54,7 @@ class TFBN(keras.layers.Layer):
|
|
54 |
|
55 |
class TFPad(keras.layers.Layer):
|
56 |
def __init__(self, pad):
|
57 |
-
super(
|
58 |
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
59 |
|
60 |
def call(self, inputs):
|
@@ -65,7 +65,7 @@ class TFConv(keras.layers.Layer):
|
|
65 |
# Standard convolution
|
66 |
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
67 |
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
68 |
-
super(
|
69 |
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
70 |
assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
|
71 |
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
@@ -96,7 +96,7 @@ class TFFocus(keras.layers.Layer):
|
|
96 |
# Focus wh information into c-space
|
97 |
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
98 |
# ch_in, ch_out, kernel, stride, padding, groups
|
99 |
-
super(
|
100 |
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
101 |
|
102 |
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
@@ -110,7 +110,7 @@ class TFFocus(keras.layers.Layer):
|
|
110 |
class TFBottleneck(keras.layers.Layer):
|
111 |
# Standard bottleneck
|
112 |
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
113 |
-
super(
|
114 |
c_ = int(c2 * e) # hidden channels
|
115 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
116 |
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
@@ -123,7 +123,7 @@ class TFBottleneck(keras.layers.Layer):
|
|
123 |
class TFConv2d(keras.layers.Layer):
|
124 |
# Substitution for PyTorch nn.Conv2D
|
125 |
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
126 |
-
super(
|
127 |
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
128 |
self.conv = keras.layers.Conv2D(
|
129 |
c2, k, s, 'VALID', use_bias=bias,
|
@@ -138,7 +138,7 @@ class TFBottleneckCSP(keras.layers.Layer):
|
|
138 |
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
139 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
140 |
# ch_in, ch_out, number, shortcut, groups, expansion
|
141 |
-
super(
|
142 |
c_ = int(c2 * e) # hidden channels
|
143 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
144 |
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
@@ -158,7 +158,7 @@ class TFC3(keras.layers.Layer):
|
|
158 |
# CSP Bottleneck with 3 convolutions
|
159 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
160 |
# ch_in, ch_out, number, shortcut, groups, expansion
|
161 |
-
super(
|
162 |
c_ = int(c2 * e) # hidden channels
|
163 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
164 |
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
@@ -172,7 +172,7 @@ class TFC3(keras.layers.Layer):
|
|
172 |
class TFSPP(keras.layers.Layer):
|
173 |
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
174 |
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
175 |
-
super(
|
176 |
c_ = c1 // 2 # hidden channels
|
177 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
178 |
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
@@ -186,7 +186,7 @@ class TFSPP(keras.layers.Layer):
|
|
186 |
class TFSPPF(keras.layers.Layer):
|
187 |
# Spatial pyramid pooling-Fast layer
|
188 |
def __init__(self, c1, c2, k=5, w=None):
|
189 |
-
super(
|
190 |
c_ = c1 // 2 # hidden channels
|
191 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
192 |
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
@@ -201,7 +201,7 @@ class TFSPPF(keras.layers.Layer):
|
|
201 |
|
202 |
class TFDetect(keras.layers.Layer):
|
203 |
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
204 |
-
super(
|
205 |
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
206 |
self.nc = nc # number of classes
|
207 |
self.no = nc + 5 # number of outputs per anchor
|
@@ -249,7 +249,7 @@ class TFDetect(keras.layers.Layer):
|
|
249 |
|
250 |
class TFUpsample(keras.layers.Layer):
|
251 |
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
252 |
-
super(
|
253 |
assert scale_factor == 2, "scale_factor must be 2"
|
254 |
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
|
255 |
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
@@ -263,7 +263,7 @@ class TFUpsample(keras.layers.Layer):
|
|
263 |
|
264 |
class TFConcat(keras.layers.Layer):
|
265 |
def __init__(self, dimension=1, w=None):
|
266 |
-
super(
|
267 |
assert dimension == 1, "convert only NCHW to NHWC concat"
|
268 |
self.d = 3
|
269 |
|
@@ -272,7 +272,7 @@ class TFConcat(keras.layers.Layer):
|
|
272 |
|
273 |
|
274 |
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
275 |
-
LOGGER.info(
|
276 |
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
277 |
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
278 |
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
@@ -299,7 +299,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
|
299 |
elif m is nn.BatchNorm2d:
|
300 |
args = [ch[f]]
|
301 |
elif m is Concat:
|
302 |
-
c2 = sum(
|
303 |
elif m is Detect:
|
304 |
args.append([ch[x + 1] for x in f])
|
305 |
if isinstance(args[1], int): # number of anchors
|
@@ -312,11 +312,11 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
|
312 |
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
313 |
else tf_m(*args, w=model.model[i]) # module
|
314 |
|
315 |
-
torch_m_ = nn.Sequential(*
|
316 |
t = str(m)[8:-2].replace('__main__.', '') # module type
|
317 |
-
np = sum(
|
318 |
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
319 |
-
LOGGER.info('
|
320 |
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
321 |
layers.append(m_)
|
322 |
ch.append(c2)
|
@@ -325,7 +325,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
|
325 |
|
326 |
class TFModel:
|
327 |
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
328 |
-
super(
|
329 |
if isinstance(cfg, dict):
|
330 |
self.yaml = cfg # model dict
|
331 |
else: # is *.yaml
|
@@ -336,7 +336,7 @@ class TFModel:
|
|
336 |
|
337 |
# Define model
|
338 |
if nc and nc != self.yaml['nc']:
|
339 |
-
print(
|
340 |
self.yaml['nc'] = nc # override yaml value
|
341 |
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
342 |
|
|
|
40 |
class TFBN(keras.layers.Layer):
|
41 |
# TensorFlow BatchNormalization wrapper
|
42 |
def __init__(self, w=None):
|
43 |
+
super().__init__()
|
44 |
self.bn = keras.layers.BatchNormalization(
|
45 |
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
46 |
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
|
|
54 |
|
55 |
class TFPad(keras.layers.Layer):
|
56 |
def __init__(self, pad):
|
57 |
+
super().__init__()
|
58 |
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
59 |
|
60 |
def call(self, inputs):
|
|
|
65 |
# Standard convolution
|
66 |
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
67 |
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
68 |
+
super().__init__()
|
69 |
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
70 |
assert isinstance(k, int), "Convolution with multiple kernels are not allowed."
|
71 |
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
|
|
96 |
# Focus wh information into c-space
|
97 |
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
98 |
# ch_in, ch_out, kernel, stride, padding, groups
|
99 |
+
super().__init__()
|
100 |
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
101 |
|
102 |
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
|
|
110 |
class TFBottleneck(keras.layers.Layer):
|
111 |
# Standard bottleneck
|
112 |
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
113 |
+
super().__init__()
|
114 |
c_ = int(c2 * e) # hidden channels
|
115 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
116 |
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
|
|
123 |
class TFConv2d(keras.layers.Layer):
|
124 |
# Substitution for PyTorch nn.Conv2D
|
125 |
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
126 |
+
super().__init__()
|
127 |
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
128 |
self.conv = keras.layers.Conv2D(
|
129 |
c2, k, s, 'VALID', use_bias=bias,
|
|
|
138 |
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
139 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
140 |
# ch_in, ch_out, number, shortcut, groups, expansion
|
141 |
+
super().__init__()
|
142 |
c_ = int(c2 * e) # hidden channels
|
143 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
144 |
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
|
|
158 |
# CSP Bottleneck with 3 convolutions
|
159 |
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
160 |
# ch_in, ch_out, number, shortcut, groups, expansion
|
161 |
+
super().__init__()
|
162 |
c_ = int(c2 * e) # hidden channels
|
163 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
164 |
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
|
|
172 |
class TFSPP(keras.layers.Layer):
|
173 |
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
174 |
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
175 |
+
super().__init__()
|
176 |
c_ = c1 // 2 # hidden channels
|
177 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
178 |
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
|
|
186 |
class TFSPPF(keras.layers.Layer):
|
187 |
# Spatial pyramid pooling-Fast layer
|
188 |
def __init__(self, c1, c2, k=5, w=None):
|
189 |
+
super().__init__()
|
190 |
c_ = c1 // 2 # hidden channels
|
191 |
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
192 |
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
|
|
201 |
|
202 |
class TFDetect(keras.layers.Layer):
|
203 |
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
204 |
+
super().__init__()
|
205 |
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
206 |
self.nc = nc # number of classes
|
207 |
self.no = nc + 5 # number of outputs per anchor
|
|
|
249 |
|
250 |
class TFUpsample(keras.layers.Layer):
|
251 |
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
252 |
+
super().__init__()
|
253 |
assert scale_factor == 2, "scale_factor must be 2"
|
254 |
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)
|
255 |
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
|
|
263 |
|
264 |
class TFConcat(keras.layers.Layer):
|
265 |
def __init__(self, dimension=1, w=None):
|
266 |
+
super().__init__()
|
267 |
assert dimension == 1, "convert only NCHW to NHWC concat"
|
268 |
self.d = 3
|
269 |
|
|
|
272 |
|
273 |
|
274 |
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
275 |
+
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
276 |
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
277 |
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
278 |
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
|
|
299 |
elif m is nn.BatchNorm2d:
|
300 |
args = [ch[f]]
|
301 |
elif m is Concat:
|
302 |
+
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
303 |
elif m is Detect:
|
304 |
args.append([ch[x + 1] for x in f])
|
305 |
if isinstance(args[1], int): # number of anchors
|
|
|
312 |
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
313 |
else tf_m(*args, w=model.model[i]) # module
|
314 |
|
315 |
+
torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
316 |
t = str(m)[8:-2].replace('__main__.', '') # module type
|
317 |
+
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
318 |
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
319 |
+
LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
|
320 |
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
321 |
layers.append(m_)
|
322 |
ch.append(c2)
|
|
|
325 |
|
326 |
class TFModel:
|
327 |
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
328 |
+
super().__init__()
|
329 |
if isinstance(cfg, dict):
|
330 |
self.yaml = cfg # model dict
|
331 |
else: # is *.yaml
|
|
|
336 |
|
337 |
# Define model
|
338 |
if nc and nc != self.yaml['nc']:
|
339 |
+
print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
340 |
self.yaml['nc'] = nc # override yaml value
|
341 |
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
342 |
|
models/yolo.py
CHANGED
@@ -247,7 +247,7 @@ class Model(nn.Module):
|
|
247 |
|
248 |
|
249 |
def parse_model(d, ch): # model_dict, input_channels(3)
|
250 |
-
LOGGER.info(
|
251 |
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
252 |
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
253 |
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
@@ -275,7 +275,7 @@ def parse_model(d, ch): # model_dict, input_channels(3)
|
|
275 |
elif m is nn.BatchNorm2d:
|
276 |
args = [ch[f]]
|
277 |
elif m is Concat:
|
278 |
-
c2 = sum(
|
279 |
elif m is Detect:
|
280 |
args.append([ch[x] for x in f])
|
281 |
if isinstance(args[1], int): # number of anchors
|
@@ -287,11 +287,11 @@ def parse_model(d, ch): # model_dict, input_channels(3)
|
|
287 |
else:
|
288 |
c2 = ch[f]
|
289 |
|
290 |
-
m_ = nn.Sequential(*
|
291 |
t = str(m)[8:-2].replace('__main__.', '') # module type
|
292 |
-
np = sum(
|
293 |
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
294 |
-
LOGGER.info('
|
295 |
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
296 |
layers.append(m_)
|
297 |
if i == 0:
|
|
|
247 |
|
248 |
|
249 |
def parse_model(d, ch): # model_dict, input_channels(3)
|
250 |
+
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
251 |
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
252 |
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
253 |
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
|
|
275 |
elif m is nn.BatchNorm2d:
|
276 |
args = [ch[f]]
|
277 |
elif m is Concat:
|
278 |
+
c2 = sum(ch[x] for x in f)
|
279 |
elif m is Detect:
|
280 |
args.append([ch[x] for x in f])
|
281 |
if isinstance(args[1], int): # number of anchors
|
|
|
287 |
else:
|
288 |
c2 = ch[f]
|
289 |
|
290 |
+
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
291 |
t = str(m)[8:-2].replace('__main__.', '') # module type
|
292 |
+
np = sum(x.numel() for x in m_.parameters()) # number params
|
293 |
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
294 |
+
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
|
295 |
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
296 |
layers.append(m_)
|
297 |
if i == 0:
|
setup.cfg
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Project-wide configuration file, can be used for package metadata and other toll configurations
|
2 |
+
# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments
|
3 |
+
|
4 |
+
[metadata]
|
5 |
+
license_file = LICENSE
|
6 |
+
description-file = README.md
|
7 |
+
|
8 |
+
|
9 |
+
[tool:pytest]
|
10 |
+
norecursedirs =
|
11 |
+
.git
|
12 |
+
dist
|
13 |
+
build
|
14 |
+
addopts =
|
15 |
+
--doctest-modules
|
16 |
+
--durations=25
|
17 |
+
--color=yes
|
18 |
+
|
19 |
+
|
20 |
+
[flake8]
|
21 |
+
max-line-length = 120
|
22 |
+
exclude = .tox,*.egg,build,temp
|
23 |
+
select = E,W,F
|
24 |
+
doctests = True
|
25 |
+
verbose = 2
|
26 |
+
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
|
27 |
+
format = pylint
|
28 |
+
# see: https://www.flake8rules.com/
|
29 |
+
ignore =
|
30 |
+
E731 # Do not assign a lambda expression, use a def
|
31 |
+
F405
|
32 |
+
E402
|
33 |
+
F841
|
34 |
+
E741
|
35 |
+
F821
|
36 |
+
E722
|
37 |
+
F401
|
38 |
+
W504
|
39 |
+
E127
|
40 |
+
W504
|
41 |
+
E231
|
42 |
+
E501
|
43 |
+
F403
|
44 |
+
E302
|
45 |
+
F541
|
tutorial.ipynb
CHANGED
@@ -1014,4 +1014,4 @@
|
|
1014 |
"outputs": []
|
1015 |
}
|
1016 |
]
|
1017 |
-
}
|
|
|
1014 |
"outputs": []
|
1015 |
}
|
1016 |
]
|
1017 |
+
}
|
utils/datasets.py
CHANGED
@@ -140,7 +140,7 @@ class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
|
|
140 |
yield next(self.iterator)
|
141 |
|
142 |
|
143 |
-
class _RepeatSampler
|
144 |
""" Sampler that repeats forever
|
145 |
|
146 |
Args:
|
@@ -287,7 +287,7 @@ class LoadStreams:
|
|
287 |
self.stride = stride
|
288 |
|
289 |
if os.path.isfile(sources):
|
290 |
-
with open(sources
|
291 |
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
|
292 |
else:
|
293 |
sources = [sources]
|
@@ -398,14 +398,14 @@ class LoadImagesAndLabels(Dataset):
|
|
398 |
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
|
399 |
# f = list(p.rglob('*.*')) # pathlib
|
400 |
elif p.is_file(): # file
|
401 |
-
with open(p
|
402 |
t = t.read().strip().splitlines()
|
403 |
parent = str(p.parent) + os.sep
|
404 |
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
|
405 |
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
|
406 |
else:
|
407 |
raise Exception(f'{prefix}{p} does not exist')
|
408 |
-
self.img_files = sorted(
|
409 |
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
|
410 |
assert self.img_files, f'{prefix}No images found'
|
411 |
except Exception as e:
|
@@ -681,7 +681,7 @@ def load_mosaic(self, index):
|
|
681 |
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
|
682 |
labels4, segments4 = [], []
|
683 |
s = self.img_size
|
684 |
-
yc, xc =
|
685 |
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
|
686 |
random.shuffle(indices)
|
687 |
for i, index in enumerate(indices):
|
@@ -767,7 +767,7 @@ def load_mosaic9(self, index):
|
|
767 |
c = s - w, s + h0 - hp - h, s, s + h0 - hp
|
768 |
|
769 |
padx, pady = c[:2]
|
770 |
-
x1, y1, x2, y2 =
|
771 |
|
772 |
# Labels
|
773 |
labels, segments = self.labels[index].copy(), self.segments[index].copy()
|
@@ -782,7 +782,7 @@ def load_mosaic9(self, index):
|
|
782 |
hp, wp = h, w # height, width previous
|
783 |
|
784 |
# Offset
|
785 |
-
yc, xc =
|
786 |
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
|
787 |
|
788 |
# Concat/clip labels
|
@@ -838,7 +838,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *;
|
|
838 |
# labels
|
839 |
lb_file = Path(img2label_paths([str(im_file)])[0])
|
840 |
if Path(lb_file).exists():
|
841 |
-
with open(lb_file
|
842 |
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
843 |
|
844 |
for j, x in enumerate(lb):
|
@@ -866,7 +866,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota
|
|
866 |
annotated_only: Only use images with an annotated txt file
|
867 |
"""
|
868 |
path = Path(path) # images dir
|
869 |
-
files = sorted(
|
870 |
n = len(files) # number of files
|
871 |
random.seed(0) # for reproducibility
|
872 |
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
@@ -902,7 +902,7 @@ def verify_image_label(args):
|
|
902 |
# verify labels
|
903 |
if os.path.isfile(lb_file):
|
904 |
nf = 1 # label found
|
905 |
-
with open(lb_file
|
906 |
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
907 |
if any([len(x) > 8 for x in l]): # is segment
|
908 |
classes = np.array([x[0] for x in l], dtype=np.float32)
|
@@ -944,7 +944,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
|
|
944 |
|
945 |
def round_labels(labels):
|
946 |
# Update labels to integer class and 6 decimal place floats
|
947 |
-
return [[int(c), *
|
948 |
|
949 |
def unzip(path):
|
950 |
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
|
@@ -1019,7 +1019,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
|
|
1019 |
with open(file, 'w') as f:
|
1020 |
json.dump(stats, f) # save stats *.json
|
1021 |
t2 = time.time()
|
1022 |
-
with open(file
|
1023 |
x = json.load(f) # load hyps dict
|
1024 |
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
|
1025 |
|
|
|
140 |
yield next(self.iterator)
|
141 |
|
142 |
|
143 |
+
class _RepeatSampler:
|
144 |
""" Sampler that repeats forever
|
145 |
|
146 |
Args:
|
|
|
287 |
self.stride = stride
|
288 |
|
289 |
if os.path.isfile(sources):
|
290 |
+
with open(sources) as f:
|
291 |
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
|
292 |
else:
|
293 |
sources = [sources]
|
|
|
398 |
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
|
399 |
# f = list(p.rglob('*.*')) # pathlib
|
400 |
elif p.is_file(): # file
|
401 |
+
with open(p) as t:
|
402 |
t = t.read().strip().splitlines()
|
403 |
parent = str(p.parent) + os.sep
|
404 |
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
|
405 |
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
|
406 |
else:
|
407 |
raise Exception(f'{prefix}{p} does not exist')
|
408 |
+
self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
|
409 |
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
|
410 |
assert self.img_files, f'{prefix}No images found'
|
411 |
except Exception as e:
|
|
|
681 |
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
|
682 |
labels4, segments4 = [], []
|
683 |
s = self.img_size
|
684 |
+
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
|
685 |
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
|
686 |
random.shuffle(indices)
|
687 |
for i, index in enumerate(indices):
|
|
|
767 |
c = s - w, s + h0 - hp - h, s, s + h0 - hp
|
768 |
|
769 |
padx, pady = c[:2]
|
770 |
+
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
|
771 |
|
772 |
# Labels
|
773 |
labels, segments = self.labels[index].copy(), self.segments[index].copy()
|
|
|
782 |
hp, wp = h, w # height, width previous
|
783 |
|
784 |
# Offset
|
785 |
+
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
|
786 |
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
|
787 |
|
788 |
# Concat/clip labels
|
|
|
838 |
# labels
|
839 |
lb_file = Path(img2label_paths([str(im_file)])[0])
|
840 |
if Path(lb_file).exists():
|
841 |
+
with open(lb_file) as f:
|
842 |
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
|
843 |
|
844 |
for j, x in enumerate(lb):
|
|
|
866 |
annotated_only: Only use images with an annotated txt file
|
867 |
"""
|
868 |
path = Path(path) # images dir
|
869 |
+
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
870 |
n = len(files) # number of files
|
871 |
random.seed(0) # for reproducibility
|
872 |
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
|
|
902 |
# verify labels
|
903 |
if os.path.isfile(lb_file):
|
904 |
nf = 1 # label found
|
905 |
+
with open(lb_file) as f:
|
906 |
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
907 |
if any([len(x) > 8 for x in l]): # is segment
|
908 |
classes = np.array([x[0] for x in l], dtype=np.float32)
|
|
|
944 |
|
945 |
def round_labels(labels):
|
946 |
# Update labels to integer class and 6 decimal place floats
|
947 |
+
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
|
948 |
|
949 |
def unzip(path):
|
950 |
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
|
|
|
1019 |
with open(file, 'w') as f:
|
1020 |
json.dump(stats, f) # save stats *.json
|
1021 |
t2 = time.time()
|
1022 |
+
with open(file) as f:
|
1023 |
x = json.load(f) # load hyps dict
|
1024 |
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
|
1025 |
|
utils/general.py
CHANGED
@@ -136,7 +136,7 @@ def is_writeable(dir, test=False):
|
|
136 |
pass
|
137 |
file.unlink() # remove file
|
138 |
return True
|
139 |
-
except
|
140 |
return False
|
141 |
else: # method 2
|
142 |
return os.access(dir, os.R_OK) # possible issues on Windows
|
@@ -355,7 +355,7 @@ def check_dataset(data, autodownload=True):
|
|
355 |
assert 'nc' in data, "Dataset 'nc' key missing."
|
356 |
if 'names' not in data:
|
357 |
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
|
358 |
-
train, val, test, s =
|
359 |
if val:
|
360 |
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
|
361 |
if not all(x.exists() for x in val):
|
|
|
136 |
pass
|
137 |
file.unlink() # remove file
|
138 |
return True
|
139 |
+
except OSError:
|
140 |
return False
|
141 |
else: # method 2
|
142 |
return os.access(dir, os.R_OK) # possible issues on Windows
|
|
|
355 |
assert 'nc' in data, "Dataset 'nc' key missing."
|
356 |
if 'names' not in data:
|
357 |
data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
|
358 |
+
train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
|
359 |
if val:
|
360 |
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
|
361 |
if not all(x.exists() for x in val):
|
utils/google_app_engine/app.yaml
CHANGED
@@ -11,4 +11,4 @@ manual_scaling:
|
|
11 |
resources:
|
12 |
cpu: 1
|
13 |
memory_gb: 4
|
14 |
-
disk_size_gb: 20
|
|
|
11 |
resources:
|
12 |
cpu: 1
|
13 |
memory_gb: 4
|
14 |
+
disk_size_gb: 20
|
utils/loggers/__init__.py
CHANGED
@@ -135,7 +135,7 @@ class Loggers():
|
|
135 |
# Callback runs on training end
|
136 |
if plots:
|
137 |
plot_results(file=self.save_dir / 'results.csv') # save results.png
|
138 |
-
files = ['results.png', 'confusion_matrix.png', *
|
139 |
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
|
140 |
|
141 |
if self.tb:
|
|
|
135 |
# Callback runs on training end
|
136 |
if plots:
|
137 |
plot_results(file=self.save_dir / 'results.csv') # save results.png
|
138 |
+
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
|
139 |
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
|
140 |
|
141 |
if self.tb:
|
utils/loggers/wandb/README.md
CHANGED
@@ -61,10 +61,10 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
|
|
61 |
<details>
|
62 |
<summary> <b>Usage</b> </summary>
|
63 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. </code>
|
64 |
-
|
65 |
![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
|
66 |
</details>
|
67 |
-
|
68 |
<h3> 2: Train and Log Evaluation simultaneousy </h3>
|
69 |
This is an extension of the previous section, but it'll also training after uploading the dataset. <b> This also evaluation Table</b>
|
70 |
Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,
|
@@ -72,31 +72,31 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
|
|
72 |
<details>
|
73 |
<summary> <b>Usage</b> </summary>
|
74 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data .. --upload_data </code>
|
75 |
-
|
76 |
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
|
77 |
</details>
|
78 |
-
|
79 |
<h3> 3: Train using dataset artifact </h3>
|
80 |
-
When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
|
81 |
can be used to train a model directly from the dataset artifact. <b> This also logs evaluation </b>
|
82 |
<details>
|
83 |
<summary> <b>Usage</b> </summary>
|
84 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml </code>
|
85 |
-
|
86 |
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
|
87 |
</details>
|
88 |
-
|
89 |
<h3> 4: Save model checkpoints as artifacts </h3>
|
90 |
-
To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
|
91 |
You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
|
92 |
|
93 |
<details>
|
94 |
<summary> <b>Usage</b> </summary>
|
95 |
<b>Code</b> <code> $ python train.py --save_period 1 </code>
|
96 |
-
|
97 |
![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
|
98 |
</details>
|
99 |
-
|
100 |
</details>
|
101 |
|
102 |
<h3> 5: Resume runs from checkpoint artifacts. </h3>
|
@@ -105,28 +105,28 @@ Any run can be resumed using artifacts if the <code>--resume</code> argument sta
|
|
105 |
<details>
|
106 |
<summary> <b>Usage</b> </summary>
|
107 |
<b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
|
108 |
-
|
109 |
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
|
110 |
</details>
|
111 |
-
|
112 |
<h3> 6: Resume runs from dataset artifact & checkpoint artifacts. </h3>
|
113 |
<b> Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device </b>
|
114 |
-
The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot <code>--upload_dataset</code> or
|
115 |
train from <code>_wandb.yaml</code> file and set <code>--save_period</code>
|
116 |
|
117 |
<details>
|
118 |
<summary> <b>Usage</b> </summary>
|
119 |
<b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
|
120 |
-
|
121 |
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
|
122 |
</details>
|
123 |
-
|
124 |
</details>
|
125 |
|
126 |
|
127 |
<h3> Reports </h3>
|
128 |
W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).
|
129 |
-
|
130 |
<img width="900" alt="Weights & Biases Reports" src="https://user-images.githubusercontent.com/26833433/135394029-a17eaf86-c6c1-4b1d-bb80-b90e83aaffa7.png">
|
131 |
|
132 |
|
|
|
61 |
<details>
|
62 |
<summary> <b>Usage</b> </summary>
|
63 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. </code>
|
64 |
+
|
65 |
![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
|
66 |
</details>
|
67 |
+
|
68 |
<h3> 2: Train and Log Evaluation simultaneousy </h3>
|
69 |
This is an extension of the previous section, but it'll also training after uploading the dataset. <b> This also evaluation Table</b>
|
70 |
Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,
|
|
|
72 |
<details>
|
73 |
<summary> <b>Usage</b> </summary>
|
74 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data .. --upload_data </code>
|
75 |
+
|
76 |
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
|
77 |
</details>
|
78 |
+
|
79 |
<h3> 3: Train using dataset artifact </h3>
|
80 |
+
When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that
|
81 |
can be used to train a model directly from the dataset artifact. <b> This also logs evaluation </b>
|
82 |
<details>
|
83 |
<summary> <b>Usage</b> </summary>
|
84 |
<b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml </code>
|
85 |
+
|
86 |
![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
|
87 |
</details>
|
88 |
+
|
89 |
<h3> 4: Save model checkpoints as artifacts </h3>
|
90 |
+
To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.
|
91 |
You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
|
92 |
|
93 |
<details>
|
94 |
<summary> <b>Usage</b> </summary>
|
95 |
<b>Code</b> <code> $ python train.py --save_period 1 </code>
|
96 |
+
|
97 |
![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
|
98 |
</details>
|
99 |
+
|
100 |
</details>
|
101 |
|
102 |
<h3> 5: Resume runs from checkpoint artifacts. </h3>
|
|
|
105 |
<details>
|
106 |
<summary> <b>Usage</b> </summary>
|
107 |
<b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
|
108 |
+
|
109 |
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
|
110 |
</details>
|
111 |
+
|
112 |
<h3> 6: Resume runs from dataset artifact & checkpoint artifacts. </h3>
|
113 |
<b> Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device </b>
|
114 |
+
The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot <code>--upload_dataset</code> or
|
115 |
train from <code>_wandb.yaml</code> file and set <code>--save_period</code>
|
116 |
|
117 |
<details>
|
118 |
<summary> <b>Usage</b> </summary>
|
119 |
<b>Code</b> <code> $ python train.py --resume wandb-artifact://{run_path} </code>
|
120 |
+
|
121 |
![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
|
122 |
</details>
|
123 |
+
|
124 |
</details>
|
125 |
|
126 |
|
127 |
<h3> Reports </h3>
|
128 |
W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).
|
129 |
+
|
130 |
<img width="900" alt="Weights & Biases Reports" src="https://user-images.githubusercontent.com/26833433/135394029-a17eaf86-c6c1-4b1d-bb80-b90e83aaffa7.png">
|
131 |
|
132 |
|
utils/loggers/wandb/sweep.yaml
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
# Hyperparameters for training
|
2 |
-
# To set range-
|
3 |
# Provide min and max values as:
|
4 |
# parameter:
|
5 |
-
#
|
6 |
# min: scalar
|
7 |
# max: scalar
|
8 |
# OR
|
9 |
#
|
10 |
# Set a specific list of search space-
|
11 |
-
# parameter:
|
12 |
# values: [scalar1, scalar2, scalar3...]
|
13 |
-
#
|
14 |
-
# You can use grid, bayesian and hyperopt search strategy
|
15 |
# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
|
16 |
|
17 |
program: utils/loggers/wandb/sweep.py
|
|
|
1 |
# Hyperparameters for training
|
2 |
+
# To set range-
|
3 |
# Provide min and max values as:
|
4 |
# parameter:
|
5 |
+
#
|
6 |
# min: scalar
|
7 |
# max: scalar
|
8 |
# OR
|
9 |
#
|
10 |
# Set a specific list of search space-
|
11 |
+
# parameter:
|
12 |
# values: [scalar1, scalar2, scalar3...]
|
13 |
+
#
|
14 |
+
# You can use grid, bayesian and hyperopt search strategy
|
15 |
# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration
|
16 |
|
17 |
program: utils/loggers/wandb/sweep.py
|
utils/loggers/wandb/wandb_utils.py
CHANGED
@@ -5,6 +5,7 @@ import os
|
|
5 |
import sys
|
6 |
from contextlib import contextmanager
|
7 |
from pathlib import Path
|
|
|
8 |
|
9 |
import pkg_resources as pkg
|
10 |
import yaml
|
@@ -25,7 +26,7 @@ try:
|
|
25 |
assert hasattr(wandb, '__version__') # verify package import not local dir
|
26 |
except (ImportError, AssertionError):
|
27 |
wandb = None
|
28 |
-
|
29 |
RANK = int(os.getenv('RANK', -1))
|
30 |
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
|
31 |
|
@@ -127,7 +128,7 @@ class WandbLogger():
|
|
127 |
arguments:
|
128 |
opt (namespace) -- Commandline arguments for this run
|
129 |
run_id (str) -- Run ID of W&B run to be resumed
|
130 |
-
job_type (str) -- To set the job_type for this run
|
131 |
|
132 |
"""
|
133 |
# Pre-training routine --
|
@@ -142,7 +143,8 @@ class WandbLogger():
|
|
142 |
self.max_imgs_to_log = 16
|
143 |
self.wandb_artifact_data_dict = None
|
144 |
self.data_dict = None
|
145 |
-
# It's more elegant to stick to 1 wandb.init call,
|
|
|
146 |
if isinstance(opt.resume, str): # checks resume from artifact
|
147 |
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
|
148 |
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
|
@@ -212,7 +214,7 @@ class WandbLogger():
|
|
212 |
Setup the necessary processes for training YOLO models:
|
213 |
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
|
214 |
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
|
215 |
-
- Setup log_dict, initialize bbox_interval
|
216 |
|
217 |
arguments:
|
218 |
opt (namespace) -- commandline arguments for this run
|
@@ -301,7 +303,7 @@ class WandbLogger():
|
|
301 |
path (Path) -- Path of directory containing the checkpoints
|
302 |
opt (namespace) -- Command line arguments for this run
|
303 |
epoch (int) -- Current epoch number
|
304 |
-
fitness_score (float) -- fitness score for current epoch
|
305 |
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
|
306 |
"""
|
307 |
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
|
@@ -325,7 +327,7 @@ class WandbLogger():
|
|
325 |
data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
|
326 |
single_class (boolean) -- train multi-class data as single-class
|
327 |
project (str) -- project name. Used to construct the artifact path
|
328 |
-
overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
|
329 |
file with _wandb postfix. Eg -> data_wandb.yaml
|
330 |
|
331 |
returns:
|
@@ -371,14 +373,14 @@ class WandbLogger():
|
|
371 |
for i, data in enumerate(tqdm(self.val_table.data)):
|
372 |
self.val_table_path_map[data[3]] = data[0]
|
373 |
|
374 |
-
def create_dataset_table(self, dataset, class_to_id, name='dataset'):
|
375 |
"""
|
376 |
Create and return W&B artifact containing W&B Table of the dataset.
|
377 |
|
378 |
arguments:
|
379 |
-
dataset
|
380 |
-
class_to_id
|
381 |
-
name
|
382 |
|
383 |
returns:
|
384 |
dataset artifact to be logged or used
|
@@ -419,7 +421,7 @@ class WandbLogger():
|
|
419 |
|
420 |
arguments:
|
421 |
predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
|
422 |
-
path (str): local path of the current evaluation image
|
423 |
names (dict(int, str)): hash map that maps class ids to labels
|
424 |
"""
|
425 |
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
|
@@ -430,7 +432,7 @@ class WandbLogger():
|
|
430 |
box_data.append(
|
431 |
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
432 |
"class_id": int(cls),
|
433 |
-
"box_caption": "
|
434 |
"scores": {"class_score": conf},
|
435 |
"domain": "pixel"})
|
436 |
total_conf += conf
|
@@ -450,7 +452,7 @@ class WandbLogger():
|
|
450 |
arguments:
|
451 |
pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
|
452 |
predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
|
453 |
-
path (str): local path of the current evaluation image
|
454 |
"""
|
455 |
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
|
456 |
self.log_training_progress(predn, path, names)
|
@@ -459,7 +461,7 @@ class WandbLogger():
|
|
459 |
if self.current_epoch % self.bbox_interval == 0:
|
460 |
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
461 |
"class_id": int(cls),
|
462 |
-
"box_caption": "
|
463 |
"scores": {"class_score": conf},
|
464 |
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
|
465 |
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
|
|
|
5 |
import sys
|
6 |
from contextlib import contextmanager
|
7 |
from pathlib import Path
|
8 |
+
from typing import Dict
|
9 |
|
10 |
import pkg_resources as pkg
|
11 |
import yaml
|
|
|
26 |
assert hasattr(wandb, '__version__') # verify package import not local dir
|
27 |
except (ImportError, AssertionError):
|
28 |
wandb = None
|
29 |
+
|
30 |
RANK = int(os.getenv('RANK', -1))
|
31 |
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
|
32 |
|
|
|
128 |
arguments:
|
129 |
opt (namespace) -- Commandline arguments for this run
|
130 |
run_id (str) -- Run ID of W&B run to be resumed
|
131 |
+
job_type (str) -- To set the job_type for this run
|
132 |
|
133 |
"""
|
134 |
# Pre-training routine --
|
|
|
143 |
self.max_imgs_to_log = 16
|
144 |
self.wandb_artifact_data_dict = None
|
145 |
self.data_dict = None
|
146 |
+
# It's more elegant to stick to 1 wandb.init call,
|
147 |
+
# but useful config data is overwritten in the WandbLogger's wandb.init call
|
148 |
if isinstance(opt.resume, str): # checks resume from artifact
|
149 |
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
|
150 |
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
|
|
|
214 |
Setup the necessary processes for training YOLO models:
|
215 |
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
|
216 |
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
|
217 |
+
- Setup log_dict, initialize bbox_interval
|
218 |
|
219 |
arguments:
|
220 |
opt (namespace) -- commandline arguments for this run
|
|
|
303 |
path (Path) -- Path of directory containing the checkpoints
|
304 |
opt (namespace) -- Command line arguments for this run
|
305 |
epoch (int) -- Current epoch number
|
306 |
+
fitness_score (float) -- fitness score for current epoch
|
307 |
best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
|
308 |
"""
|
309 |
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
|
|
|
327 |
data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
|
328 |
single_class (boolean) -- train multi-class data as single-class
|
329 |
project (str) -- project name. Used to construct the artifact path
|
330 |
+
overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
|
331 |
file with _wandb postfix. Eg -> data_wandb.yaml
|
332 |
|
333 |
returns:
|
|
|
373 |
for i, data in enumerate(tqdm(self.val_table.data)):
|
374 |
self.val_table_path_map[data[3]] = data[0]
|
375 |
|
376 |
+
def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'):
|
377 |
"""
|
378 |
Create and return W&B artifact containing W&B Table of the dataset.
|
379 |
|
380 |
arguments:
|
381 |
+
dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
|
382 |
+
class_to_id -- hash map that maps class ids to labels
|
383 |
+
name -- name of the artifact
|
384 |
|
385 |
returns:
|
386 |
dataset artifact to be logged or used
|
|
|
421 |
|
422 |
arguments:
|
423 |
predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
|
424 |
+
path (str): local path of the current evaluation image
|
425 |
names (dict(int, str)): hash map that maps class ids to labels
|
426 |
"""
|
427 |
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
|
|
|
432 |
box_data.append(
|
433 |
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
434 |
"class_id": int(cls),
|
435 |
+
"box_caption": f"{names[cls]} {conf:.3f}",
|
436 |
"scores": {"class_score": conf},
|
437 |
"domain": "pixel"})
|
438 |
total_conf += conf
|
|
|
452 |
arguments:
|
453 |
pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
|
454 |
predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
|
455 |
+
path (str): local path of the current evaluation image
|
456 |
"""
|
457 |
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
|
458 |
self.log_training_progress(predn, path, names)
|
|
|
461 |
if self.current_epoch % self.bbox_interval == 0:
|
462 |
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
|
463 |
"class_id": int(cls),
|
464 |
+
"box_caption": f"{names[cls]} {conf:.3f}",
|
465 |
"scores": {"class_score": conf},
|
466 |
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
|
467 |
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
|
utils/loss.py
CHANGED
@@ -18,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss
|
|
18 |
class BCEBlurWithLogitsLoss(nn.Module):
|
19 |
# BCEwithLogitLoss() with reduced missing label effects.
|
20 |
def __init__(self, alpha=0.05):
|
21 |
-
super(
|
22 |
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
|
23 |
self.alpha = alpha
|
24 |
|
@@ -35,7 +35,7 @@ class BCEBlurWithLogitsLoss(nn.Module):
|
|
35 |
class FocalLoss(nn.Module):
|
36 |
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
37 |
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
38 |
-
super(
|
39 |
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
40 |
self.gamma = gamma
|
41 |
self.alpha = alpha
|
@@ -65,7 +65,7 @@ class FocalLoss(nn.Module):
|
|
65 |
class QFocalLoss(nn.Module):
|
66 |
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
67 |
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
68 |
-
super(
|
69 |
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
70 |
self.gamma = gamma
|
71 |
self.alpha = alpha
|
|
|
18 |
class BCEBlurWithLogitsLoss(nn.Module):
|
19 |
# BCEwithLogitLoss() with reduced missing label effects.
|
20 |
def __init__(self, alpha=0.05):
|
21 |
+
super().__init__()
|
22 |
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
|
23 |
self.alpha = alpha
|
24 |
|
|
|
35 |
class FocalLoss(nn.Module):
|
36 |
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
37 |
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
38 |
+
super().__init__()
|
39 |
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
40 |
self.gamma = gamma
|
41 |
self.alpha = alpha
|
|
|
65 |
class QFocalLoss(nn.Module):
|
66 |
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
|
67 |
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
|
68 |
+
super().__init__()
|
69 |
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
|
70 |
self.gamma = gamma
|
71 |
self.alpha = alpha
|
utils/plots.py
CHANGED
@@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
|
|
250 |
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
251 |
ax = ax.ravel()
|
252 |
for i in range(4):
|
253 |
-
ax[i].hist(x[i], bins=100, label='
|
254 |
ax[i].legend()
|
255 |
ax[i].set_title(s[i])
|
256 |
plt.savefig('targets.jpg', dpi=200)
|
@@ -363,7 +363,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
|
|
363 |
else:
|
364 |
a.remove()
|
365 |
except Exception as e:
|
366 |
-
print('Warning: Plotting error for
|
367 |
ax[1].legend()
|
368 |
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
369 |
|
@@ -384,10 +384,10 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *;
|
|
384 |
plt.subplot(6, 5, i + 1)
|
385 |
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
|
386 |
plt.plot(mu, f.max(), 'k+', markersize=15)
|
387 |
-
plt.title('
|
388 |
if i % 5 != 0:
|
389 |
plt.yticks([])
|
390 |
-
print('
|
391 |
f = evolve_csv.with_suffix('.png') # filename
|
392 |
plt.savefig(f, dpi=200)
|
393 |
plt.close()
|
|
|
250 |
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
|
251 |
ax = ax.ravel()
|
252 |
for i in range(4):
|
253 |
+
ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
|
254 |
ax[i].legend()
|
255 |
ax[i].set_title(s[i])
|
256 |
plt.savefig('targets.jpg', dpi=200)
|
|
|
363 |
else:
|
364 |
a.remove()
|
365 |
except Exception as e:
|
366 |
+
print(f'Warning: Plotting error for {f}; {e}')
|
367 |
ax[1].legend()
|
368 |
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
369 |
|
|
|
384 |
plt.subplot(6, 5, i + 1)
|
385 |
plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
|
386 |
plt.plot(mu, f.max(), 'k+', markersize=15)
|
387 |
+
plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
|
388 |
if i % 5 != 0:
|
389 |
plt.yticks([])
|
390 |
+
print(f'{k:>15}: {mu:.3g}')
|
391 |
f = evolve_csv.with_suffix('.png') # filename
|
392 |
plt.savefig(f, dpi=200)
|
393 |
plt.close()
|
utils/torch_utils.py
CHANGED
@@ -123,7 +123,7 @@ def profile(input, ops, n=10, device=None):
|
|
123 |
y = m(x)
|
124 |
t[1] = time_sync()
|
125 |
try:
|
126 |
-
_ = (sum(
|
127 |
t[2] = time_sync()
|
128 |
except Exception as e: # no backward method
|
129 |
# print(e) # for debug
|
@@ -223,7 +223,7 @@ def model_info(model, verbose=False, img_size=640):
|
|
223 |
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
224 |
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
225 |
if verbose:
|
226 |
-
print('
|
227 |
for i, (name, p) in enumerate(model.named_parameters()):
|
228 |
name = name.replace('module_list.', '')
|
229 |
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
@@ -270,7 +270,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
|
|
270 |
s = (int(h * ratio), int(w * ratio)) # new size
|
271 |
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
|
272 |
if not same_shape: # pad/crop img
|
273 |
-
h, w =
|
274 |
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
|
275 |
|
276 |
|
|
|
123 |
y = m(x)
|
124 |
t[1] = time_sync()
|
125 |
try:
|
126 |
+
_ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()
|
127 |
t[2] = time_sync()
|
128 |
except Exception as e: # no backward method
|
129 |
# print(e) # for debug
|
|
|
223 |
n_p = sum(x.numel() for x in model.parameters()) # number parameters
|
224 |
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
|
225 |
if verbose:
|
226 |
+
print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
|
227 |
for i, (name, p) in enumerate(model.named_parameters()):
|
228 |
name = name.replace('module_list.', '')
|
229 |
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
|
|
|
270 |
s = (int(h * ratio), int(w * ratio)) # new size
|
271 |
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
|
272 |
if not same_shape: # pad/crop img
|
273 |
+
h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))
|
274 |
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
|
275 |
|
276 |
|