glenn-jocher
commited on
Commit
•
791dadb
1
Parent(s):
d929bb6
Pycocotools best.pt after COCO train (#1616)
Browse files* Pycocotools best.pt after COCO train
* cleanup
- models/hub/yolov3-tiny.yaml +41 -0
- models/hub/yolov3.yaml +51 -0
- test.py +2 -3
- train.py +22 -11
- utils/google_utils.py +1 -1
models/hub/yolov3-tiny.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [10,14, 23,27, 37,58] # P4/16
|
9 |
+
- [81,82, 135,169, 344,319] # P5/32
|
10 |
+
|
11 |
+
# YOLOv3-tiny backbone
|
12 |
+
backbone:
|
13 |
+
# [from, number, module, args]
|
14 |
+
[[-1, 1, Conv, [16, 3, 1]], # 0
|
15 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
|
16 |
+
[-1, 1, Conv, [32, 3, 1]],
|
17 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
|
18 |
+
[-1, 1, Conv, [64, 3, 1]],
|
19 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
|
20 |
+
[-1, 1, Conv, [128, 3, 1]],
|
21 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
|
22 |
+
[-1, 1, Conv, [256, 3, 1]],
|
23 |
+
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
|
24 |
+
[-1, 1, Conv, [512, 3, 1]],
|
25 |
+
[-1, 1, nn.ZeroPad2d, [0, 1, 0, 1]], # 11
|
26 |
+
[-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
|
27 |
+
]
|
28 |
+
|
29 |
+
# YOLOv3-tiny head
|
30 |
+
head:
|
31 |
+
[[-1, 1, Conv, [1024, 3, 1]],
|
32 |
+
[-1, 1, Conv, [256, 1, 1]],
|
33 |
+
[-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
|
34 |
+
|
35 |
+
[-2, 1, Conv, [128, 1, 1]],
|
36 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
37 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
38 |
+
[-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
|
39 |
+
|
40 |
+
[[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
|
41 |
+
]
|
models/hub/yolov3.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# parameters
|
2 |
+
nc: 80 # number of classes
|
3 |
+
depth_multiple: 1.0 # model depth multiple
|
4 |
+
width_multiple: 1.0 # layer channel multiple
|
5 |
+
|
6 |
+
# anchors
|
7 |
+
anchors:
|
8 |
+
- [10,13, 16,30, 33,23] # P3/8
|
9 |
+
- [30,61, 62,45, 59,119] # P4/16
|
10 |
+
- [116,90, 156,198, 373,326] # P5/32
|
11 |
+
|
12 |
+
# darknet53 backbone
|
13 |
+
backbone:
|
14 |
+
# [from, number, module, args]
|
15 |
+
[[-1, 1, Conv, [32, 3, 1]], # 0
|
16 |
+
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
|
17 |
+
[-1, 1, Bottleneck, [64]],
|
18 |
+
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
|
19 |
+
[-1, 2, Bottleneck, [128]],
|
20 |
+
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
|
21 |
+
[-1, 8, Bottleneck, [256]],
|
22 |
+
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
|
23 |
+
[-1, 8, Bottleneck, [512]],
|
24 |
+
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
|
25 |
+
[-1, 4, Bottleneck, [1024]], # 10
|
26 |
+
]
|
27 |
+
|
28 |
+
# YOLOv3 head
|
29 |
+
head:
|
30 |
+
[[-1, 1, Bottleneck, [1024, False]],
|
31 |
+
[-1, 1, Conv, [512, [1, 1]]],
|
32 |
+
[-1, 1, Conv, [1024, 3, 1]],
|
33 |
+
[-1, 1, Conv, [512, 1, 1]],
|
34 |
+
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
|
35 |
+
|
36 |
+
[-2, 1, Conv, [256, 1, 1]],
|
37 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
38 |
+
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
39 |
+
[-1, 1, Bottleneck, [512, False]],
|
40 |
+
[-1, 1, Bottleneck, [512, False]],
|
41 |
+
[-1, 1, Conv, [256, 1, 1]],
|
42 |
+
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
|
43 |
+
|
44 |
+
[-2, 1, Conv, [128, 1, 1]],
|
45 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
46 |
+
[[-1, 6], 1, Concat, [1]], # cat backbone P3
|
47 |
+
[-1, 1, Bottleneck, [256, False]],
|
48 |
+
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
|
49 |
+
|
50 |
+
[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
51 |
+
]
|
test.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import argparse
|
2 |
-
import glob
|
3 |
import json
|
4 |
import os
|
5 |
from pathlib import Path
|
@@ -246,7 +245,7 @@ def test(data,
|
|
246 |
# Save JSON
|
247 |
if save_json and len(jdict):
|
248 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
249 |
-
anno_json =
|
250 |
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
251 |
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
|
252 |
with open(pred_json, 'w') as f:
|
@@ -266,7 +265,7 @@ def test(data,
|
|
266 |
eval.summarize()
|
267 |
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
|
268 |
except Exception as e:
|
269 |
-
print('
|
270 |
|
271 |
# Return results
|
272 |
if not training:
|
|
|
1 |
import argparse
|
|
|
2 |
import json
|
3 |
import os
|
4 |
from pathlib import Path
|
|
|
245 |
# Save JSON
|
246 |
if save_json and len(jdict):
|
247 |
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
248 |
+
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
|
249 |
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
250 |
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
|
251 |
with open(pred_json, 'w') as f:
|
|
|
265 |
eval.summarize()
|
266 |
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
|
267 |
except Exception as e:
|
268 |
+
print(f'pycocotools unable to run: {e}')
|
269 |
|
270 |
# Return results
|
271 |
if not training:
|
train.py
CHANGED
@@ -22,6 +22,7 @@ from torch.utils.tensorboard import SummaryWriter
|
|
22 |
from tqdm import tqdm
|
23 |
|
24 |
import test # import test.py to get mAP after each epoch
|
|
|
25 |
from models.yolo import Model
|
26 |
from utils.autoanchor import check_anchors
|
27 |
from utils.datasets import create_dataloader
|
@@ -193,9 +194,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
193 |
# Process 0
|
194 |
if rank in [-1, 0]:
|
195 |
ema.updates = start_epoch * nb // accumulate # set EMA updates
|
196 |
-
testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,
|
197 |
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
|
198 |
-
rank=-1, world_size=opt.world_size, workers=opt.workers)[0]
|
199 |
|
200 |
if not opt.resume:
|
201 |
labels = np.concatenate(dataset.labels, 0)
|
@@ -385,15 +386,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
385 |
|
386 |
if rank in [-1, 0]:
|
387 |
# Strip optimizers
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
strip_optimizer(f2) # strip optimizer
|
395 |
-
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
|
396 |
-
# Finish
|
397 |
if plots:
|
398 |
plot_results(save_dir=save_dir) # save as results.png
|
399 |
if wandb:
|
@@ -401,6 +399,19 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
|
|
401 |
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
|
402 |
if (save_dir / f).exists()]})
|
403 |
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
else:
|
405 |
dist.destroy_process_group()
|
406 |
|
|
|
22 |
from tqdm import tqdm
|
23 |
|
24 |
import test # import test.py to get mAP after each epoch
|
25 |
+
from models.experimental import attempt_load
|
26 |
from models.yolo import Model
|
27 |
from utils.autoanchor import check_anchors
|
28 |
from utils.datasets import create_dataloader
|
|
|
194 |
# Process 0
|
195 |
if rank in [-1, 0]:
|
196 |
ema.updates = start_epoch * nb // accumulate # set EMA updates
|
197 |
+
testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
|
198 |
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
|
199 |
+
rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
|
200 |
|
201 |
if not opt.resume:
|
202 |
labels = np.concatenate(dataset.labels, 0)
|
|
|
386 |
|
387 |
if rank in [-1, 0]:
|
388 |
# Strip optimizers
|
389 |
+
for f in [last, best]:
|
390 |
+
if f.exists(): # is *.pt
|
391 |
+
strip_optimizer(f) # strip optimizer
|
392 |
+
os.system('gsutil cp %s gs://%s/weights' % (f, opt.bucket)) if opt.bucket else None # upload
|
393 |
+
|
394 |
+
# Plots
|
|
|
|
|
|
|
395 |
if plots:
|
396 |
plot_results(save_dir=save_dir) # save as results.png
|
397 |
if wandb:
|
|
|
399 |
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
|
400 |
if (save_dir / f).exists()]})
|
401 |
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
|
402 |
+
|
403 |
+
# Test best.pt
|
404 |
+
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
|
405 |
+
results, _, _ = test.test(opt.data,
|
406 |
+
batch_size=total_batch_size,
|
407 |
+
imgsz=imgsz_test,
|
408 |
+
model=attempt_load(best if best.exists() else last, device).half(),
|
409 |
+
single_cls=opt.single_cls,
|
410 |
+
dataloader=testloader,
|
411 |
+
save_dir=save_dir,
|
412 |
+
save_json=True, # use pycocotools
|
413 |
+
plots=False)
|
414 |
+
|
415 |
else:
|
416 |
dist.destroy_process_group()
|
417 |
|
utils/google_utils.py
CHANGED
@@ -17,7 +17,7 @@ def gsutil_getsize(url=''):
|
|
17 |
|
18 |
def attempt_download(weights):
|
19 |
# Attempt to download pretrained weights if not found locally
|
20 |
-
weights = weights.strip().replace("'", '')
|
21 |
file = Path(weights).name.lower()
|
22 |
|
23 |
msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/'
|
|
|
17 |
|
18 |
def attempt_download(weights):
|
19 |
# Attempt to download pretrained weights if not found locally
|
20 |
+
weights = str(weights).strip().replace("'", '')
|
21 |
file = Path(weights).name.lower()
|
22 |
|
23 |
msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/'
|