Jirka Borovec
commited on
Explicit Imports (#498)
Browse files* expand imports
* optimize
* miss
* fix
- detect.py +17 -8
- hubconf.py +2 -3
- models/common.py +3 -1
- models/experimental.py +7 -3
- models/export.py +4 -3
- models/yolo.py +17 -8
- test.py +20 -7
- train.py +20 -7
- utils/datasets.py +1 -1
- utils/{utils.py → general.py} +4 -4
detect.py
CHANGED
@@ -1,10 +1,19 @@
|
|
1 |
import argparse
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import torch.backends.cudnn as cudnn
|
|
|
4 |
|
5 |
-
from models.experimental import
|
6 |
-
from utils.datasets import
|
7 |
-
from utils.
|
|
|
8 |
|
9 |
|
10 |
def detect(save_img=False):
|
@@ -13,7 +22,7 @@ def detect(save_img=False):
|
|
13 |
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
|
14 |
|
15 |
# Initialize
|
16 |
-
device =
|
17 |
if os.path.exists(out):
|
18 |
shutil.rmtree(out) # delete output folder
|
19 |
os.makedirs(out) # make new output folder
|
@@ -28,7 +37,7 @@ def detect(save_img=False):
|
|
28 |
# Second-stage classifier
|
29 |
classify = False
|
30 |
if classify:
|
31 |
-
modelc =
|
32 |
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
|
33 |
modelc.to(device).eval()
|
34 |
|
@@ -58,12 +67,12 @@ def detect(save_img=False):
|
|
58 |
img = img.unsqueeze(0)
|
59 |
|
60 |
# Inference
|
61 |
-
t1 =
|
62 |
pred = model(img, augment=opt.augment)[0]
|
63 |
|
64 |
# Apply NMS
|
65 |
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
|
66 |
-
t2 =
|
67 |
|
68 |
# Apply Classifier
|
69 |
if classify:
|
|
|
1 |
import argparse
|
2 |
+
import os
|
3 |
+
import platform
|
4 |
+
import shutil
|
5 |
+
import time
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import cv2
|
9 |
+
import torch
|
10 |
import torch.backends.cudnn as cudnn
|
11 |
+
from numpy import random
|
12 |
|
13 |
+
from models.experimental import attempt_load
|
14 |
+
from utils.datasets import LoadStreams, LoadImages
|
15 |
+
from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box
|
16 |
+
from utils.torch_utils import select_device, load_classifier, time_synchronized
|
17 |
|
18 |
|
19 |
def detect(save_img=False):
|
|
|
22 |
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
|
23 |
|
24 |
# Initialize
|
25 |
+
device = select_device(opt.device)
|
26 |
if os.path.exists(out):
|
27 |
shutil.rmtree(out) # delete output folder
|
28 |
os.makedirs(out) # make new output folder
|
|
|
37 |
# Second-stage classifier
|
38 |
classify = False
|
39 |
if classify:
|
40 |
+
modelc = load_classifier(name='resnet101', n=2) # initialize
|
41 |
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
|
42 |
modelc.to(device).eval()
|
43 |
|
|
|
67 |
img = img.unsqueeze(0)
|
68 |
|
69 |
# Inference
|
70 |
+
t1 = time_synchronized()
|
71 |
pred = model(img, augment=opt.augment)[0]
|
72 |
|
73 |
# Apply NMS
|
74 |
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
|
75 |
+
t2 = time_synchronized()
|
76 |
|
77 |
# Apply Classifier
|
78 |
if classify:
|
hubconf.py
CHANGED
@@ -6,13 +6,12 @@ Usage:
|
|
6 |
"""
|
7 |
|
8 |
dependencies = ['torch', 'yaml']
|
9 |
-
|
10 |
import os
|
11 |
|
12 |
import torch
|
13 |
|
14 |
from models.yolo import Model
|
15 |
-
from utils import
|
16 |
|
17 |
|
18 |
def create(name, pretrained, channels, classes):
|
@@ -32,7 +31,7 @@ def create(name, pretrained, channels, classes):
|
|
32 |
model = Model(config, channels, classes)
|
33 |
if pretrained:
|
34 |
ckpt = '%s.pt' % name # checkpoint filename
|
35 |
-
|
36 |
state_dict = torch.load(ckpt, map_location=torch.device('cpu'))['model'].float().state_dict() # to FP32
|
37 |
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
|
38 |
model.load_state_dict(state_dict, strict=False) # load
|
|
|
6 |
"""
|
7 |
|
8 |
dependencies = ['torch', 'yaml']
|
|
|
9 |
import os
|
10 |
|
11 |
import torch
|
12 |
|
13 |
from models.yolo import Model
|
14 |
+
from utils.google_utils import attempt_download
|
15 |
|
16 |
|
17 |
def create(name, pretrained, channels, classes):
|
|
|
31 |
model = Model(config, channels, classes)
|
32 |
if pretrained:
|
33 |
ckpt = '%s.pt' % name # checkpoint filename
|
34 |
+
attempt_download(ckpt) # download if not found locally
|
35 |
state_dict = torch.load(ckpt, map_location=torch.device('cpu'))['model'].float().state_dict() # to FP32
|
36 |
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
|
37 |
model.load_state_dict(state_dict, strict=False) # load
|
models/common.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
# This file contains modules common to various models
|
|
|
2 |
|
3 |
-
|
|
|
4 |
|
5 |
|
6 |
def autopad(k, p=None): # kernel, padding
|
|
|
1 |
# This file contains modules common to various models
|
2 |
+
import math
|
3 |
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
|
7 |
|
8 |
def autopad(k, p=None): # kernel, padding
|
models/experimental.py
CHANGED
@@ -1,7 +1,11 @@
|
|
1 |
# This file contains experimental modules
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
class CrossConv(nn.Module):
|
@@ -129,7 +133,7 @@ def attempt_load(weights, map_location=None):
|
|
129 |
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
130 |
model = Ensemble()
|
131 |
for w in weights if isinstance(weights, list) else [weights]:
|
132 |
-
|
133 |
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
|
134 |
|
135 |
if len(model) == 1:
|
|
|
1 |
# This file contains experimental modules
|
2 |
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
|
7 |
+
from models.common import Conv, DWConv
|
8 |
+
from utils.google_utils import attempt_download
|
9 |
|
10 |
|
11 |
class CrossConv(nn.Module):
|
|
|
133 |
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
134 |
model = Ensemble()
|
135 |
for w in weights if isinstance(weights, list) else [weights]:
|
136 |
+
attempt_download(w)
|
137 |
model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
|
138 |
|
139 |
if len(model) == 1:
|
models/export.py
CHANGED
@@ -6,8 +6,9 @@ Usage:
|
|
6 |
|
7 |
import argparse
|
8 |
|
9 |
-
|
10 |
-
|
|
|
11 |
|
12 |
if __name__ == '__main__':
|
13 |
parser = argparse.ArgumentParser()
|
@@ -22,7 +23,7 @@ if __name__ == '__main__':
|
|
22 |
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
|
23 |
|
24 |
# Load PyTorch model
|
25 |
-
|
26 |
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
|
27 |
model.eval()
|
28 |
model.model[-1].export = True # set Detect() layer export=True
|
|
|
6 |
|
7 |
import argparse
|
8 |
|
9 |
+
import torch
|
10 |
+
|
11 |
+
from utils.google_utils import attempt_download
|
12 |
|
13 |
if __name__ == '__main__':
|
14 |
parser = argparse.ArgumentParser()
|
|
|
23 |
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection
|
24 |
|
25 |
# Load PyTorch model
|
26 |
+
attempt_download(opt.weights)
|
27 |
model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float()
|
28 |
model.eval()
|
29 |
model.model[-1].export = True # set Detect() layer export=True
|
models/yolo.py
CHANGED
@@ -1,7 +1,16 @@
|
|
1 |
import argparse
|
|
|
2 |
from copy import deepcopy
|
|
|
3 |
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
class Detect(nn.Module):
|
@@ -75,7 +84,7 @@ class Model(nn.Module):
|
|
75 |
# print('Strides: %s' % m.stride.tolist())
|
76 |
|
77 |
# Init weights, biases
|
78 |
-
|
79 |
self.info()
|
80 |
print('')
|
81 |
|
@@ -86,7 +95,7 @@ class Model(nn.Module):
|
|
86 |
f = [None, 3, None] # flips (2-ud, 3-lr)
|
87 |
y = [] # outputs
|
88 |
for si, fi in zip(s, f):
|
89 |
-
xi =
|
90 |
yi = self.forward_once(xi)[0] # forward
|
91 |
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
92 |
yi[..., :4] /= si # de-scale
|
@@ -111,10 +120,10 @@ class Model(nn.Module):
|
|
111 |
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
|
112 |
except:
|
113 |
o = 0
|
114 |
-
t =
|
115 |
for _ in range(10):
|
116 |
_ = m(x)
|
117 |
-
dt.append((
|
118 |
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
|
119 |
|
120 |
x = m(x) # run
|
@@ -149,14 +158,14 @@ class Model(nn.Module):
|
|
149 |
for m in self.model.modules():
|
150 |
if type(m) is Conv:
|
151 |
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
|
152 |
-
m.conv =
|
153 |
m.bn = None # remove batchnorm
|
154 |
m.forward = m.fuseforward # update forward
|
155 |
self.info()
|
156 |
return self
|
157 |
|
158 |
def info(self): # print model information
|
159 |
-
|
160 |
|
161 |
|
162 |
def parse_model(d, ch): # model_dict, input_channels(3)
|
@@ -228,7 +237,7 @@ if __name__ == '__main__':
|
|
228 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
229 |
opt = parser.parse_args()
|
230 |
opt.cfg = check_file(opt.cfg) # check file
|
231 |
-
device =
|
232 |
|
233 |
# Create model
|
234 |
model = Model(opt.cfg).to(device)
|
|
|
1 |
import argparse
|
2 |
+
import math
|
3 |
from copy import deepcopy
|
4 |
+
from pathlib import Path
|
5 |
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
|
9 |
+
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat
|
10 |
+
from models.experimental import MixConv2d, CrossConv, C3
|
11 |
+
from utils.general import check_anchor_order, make_divisible, check_file
|
12 |
+
from utils.torch_utils import (
|
13 |
+
time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device)
|
14 |
|
15 |
|
16 |
class Detect(nn.Module):
|
|
|
84 |
# print('Strides: %s' % m.stride.tolist())
|
85 |
|
86 |
# Init weights, biases
|
87 |
+
initialize_weights(self)
|
88 |
self.info()
|
89 |
print('')
|
90 |
|
|
|
95 |
f = [None, 3, None] # flips (2-ud, 3-lr)
|
96 |
y = [] # outputs
|
97 |
for si, fi in zip(s, f):
|
98 |
+
xi = scale_img(x.flip(fi) if fi else x, si)
|
99 |
yi = self.forward_once(xi)[0] # forward
|
100 |
# cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
101 |
yi[..., :4] /= si # de-scale
|
|
|
120 |
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
|
121 |
except:
|
122 |
o = 0
|
123 |
+
t = time_synchronized()
|
124 |
for _ in range(10):
|
125 |
_ = m(x)
|
126 |
+
dt.append((time_synchronized() - t) * 100)
|
127 |
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
|
128 |
|
129 |
x = m(x) # run
|
|
|
158 |
for m in self.model.modules():
|
159 |
if type(m) is Conv:
|
160 |
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
|
161 |
+
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
162 |
m.bn = None # remove batchnorm
|
163 |
m.forward = m.fuseforward # update forward
|
164 |
self.info()
|
165 |
return self
|
166 |
|
167 |
def info(self): # print model information
|
168 |
+
model_info(self)
|
169 |
|
170 |
|
171 |
def parse_model(d, ch): # model_dict, input_channels(3)
|
|
|
237 |
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
238 |
opt = parser.parse_args()
|
239 |
opt.cfg = check_file(opt.cfg) # check file
|
240 |
+
device = select_device(opt.device)
|
241 |
|
242 |
# Create model
|
243 |
model = Model(opt.cfg).to(device)
|
test.py
CHANGED
@@ -1,8 +1,21 @@
|
|
1 |
import argparse
|
|
|
2 |
import json
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
def test(data,
|
@@ -26,7 +39,7 @@ def test(data,
|
|
26 |
device = next(model.parameters()).device # get model device
|
27 |
|
28 |
else: # called directly
|
29 |
-
device =
|
30 |
merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
|
31 |
if save_txt:
|
32 |
out = Path('inference/output')
|
@@ -85,18 +98,18 @@ def test(data,
|
|
85 |
# Disable gradients
|
86 |
with torch.no_grad():
|
87 |
# Run model
|
88 |
-
t =
|
89 |
inf_out, train_out = model(img, augment=augment) # inference and training outputs
|
90 |
-
t0 +=
|
91 |
|
92 |
# Compute loss
|
93 |
if training: # if model has loss hyperparameters
|
94 |
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
|
95 |
|
96 |
# Run NMS
|
97 |
-
t =
|
98 |
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
|
99 |
-
t1 +=
|
100 |
|
101 |
# Statistics per image
|
102 |
for si, pred in enumerate(output):
|
|
|
1 |
import argparse
|
2 |
+
import glob
|
3 |
import json
|
4 |
+
import os
|
5 |
+
import shutil
|
6 |
+
from pathlib import Path
|
7 |
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
import yaml
|
11 |
+
from tqdm import tqdm
|
12 |
+
|
13 |
+
from models.experimental import attempt_load
|
14 |
+
from utils.datasets import create_dataloader
|
15 |
+
from utils.general import (
|
16 |
+
coco80_to_coco91_class, check_file, check_img_size, compute_loss, non_max_suppression,
|
17 |
+
scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class)
|
18 |
+
from utils.torch_utils import select_device, time_synchronized
|
19 |
|
20 |
|
21 |
def test(data,
|
|
|
39 |
device = next(model.parameters()).device # get model device
|
40 |
|
41 |
else: # called directly
|
42 |
+
device = select_device(opt.device, batch_size=batch_size)
|
43 |
merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
|
44 |
if save_txt:
|
45 |
out = Path('inference/output')
|
|
|
98 |
# Disable gradients
|
99 |
with torch.no_grad():
|
100 |
# Run model
|
101 |
+
t = time_synchronized()
|
102 |
inf_out, train_out = model(img, augment=augment) # inference and training outputs
|
103 |
+
t0 += time_synchronized() - t
|
104 |
|
105 |
# Compute loss
|
106 |
if training: # if model has loss hyperparameters
|
107 |
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
|
108 |
|
109 |
# Run NMS
|
110 |
+
t = time_synchronized()
|
111 |
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
|
112 |
+
t1 += time_synchronized() - t
|
113 |
|
114 |
# Statistics per image
|
115 |
for si, pred in enumerate(output):
|
train.py
CHANGED
@@ -1,19 +1,32 @@
|
|
1 |
import argparse
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import torch.distributed as dist
|
4 |
import torch.nn.functional as F
|
5 |
import torch.optim as optim
|
6 |
import torch.optim.lr_scheduler as lr_scheduler
|
7 |
import torch.utils.data
|
|
|
8 |
from torch.cuda import amp
|
9 |
from torch.nn.parallel import DistributedDataParallel as DDP
|
10 |
from torch.utils.tensorboard import SummaryWriter
|
|
|
11 |
|
12 |
import test # import test.py to get mAP after each epoch
|
13 |
from models.yolo import Model
|
14 |
-
from utils import
|
15 |
-
from utils.
|
16 |
-
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# Hyperparameters
|
19 |
hyp = {'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
|
@@ -119,7 +132,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|
119 |
|
120 |
# Load Model
|
121 |
with torch_distributed_zero_first(rank):
|
122 |
-
|
123 |
start_epoch, best_fitness = 0, 0.0
|
124 |
if weights.endswith('.pt'): # pytorch format
|
125 |
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
@@ -167,7 +180,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|
167 |
print('Using SyncBatchNorm()')
|
168 |
|
169 |
# Exponential moving average
|
170 |
-
ema =
|
171 |
|
172 |
# DDP mode
|
173 |
if cuda and rank != -1:
|
@@ -438,7 +451,7 @@ if __name__ == '__main__':
|
|
438 |
with open(opt.hyp) as f:
|
439 |
hyp.update(yaml.load(f, Loader=yaml.FullLoader)) # update hyps
|
440 |
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
|
441 |
-
device =
|
442 |
opt.total_batch_size = opt.batch_size
|
443 |
opt.world_size = 1
|
444 |
|
|
|
1 |
import argparse
|
2 |
+
import glob
|
3 |
+
import math
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
from pathlib import Path
|
7 |
+
from random import random
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
import torch.distributed as dist
|
11 |
import torch.nn.functional as F
|
12 |
import torch.optim as optim
|
13 |
import torch.optim.lr_scheduler as lr_scheduler
|
14 |
import torch.utils.data
|
15 |
+
import yaml
|
16 |
from torch.cuda import amp
|
17 |
from torch.nn.parallel import DistributedDataParallel as DDP
|
18 |
from torch.utils.tensorboard import SummaryWriter
|
19 |
+
from tqdm import tqdm
|
20 |
|
21 |
import test # import test.py to get mAP after each epoch
|
22 |
from models.yolo import Model
|
23 |
+
from utils.datasets import create_dataloader
|
24 |
+
from utils.general import (
|
25 |
+
check_img_size, torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors,
|
26 |
+
labels_to_image_weights, compute_loss, plot_images, fitness, strip_optimizer, plot_results,
|
27 |
+
get_latest_run, check_git_status, check_file, increment_dir, print_mutation)
|
28 |
+
from utils.google_utils import attempt_download
|
29 |
+
from utils.torch_utils import init_seeds, ModelEMA, select_device
|
30 |
|
31 |
# Hyperparameters
|
32 |
hyp = {'lr0': 0.01, # initial learning rate (SGD=1E-2, Adam=1E-3)
|
|
|
132 |
|
133 |
# Load Model
|
134 |
with torch_distributed_zero_first(rank):
|
135 |
+
attempt_download(weights)
|
136 |
start_epoch, best_fitness = 0, 0.0
|
137 |
if weights.endswith('.pt'): # pytorch format
|
138 |
ckpt = torch.load(weights, map_location=device) # load checkpoint
|
|
|
180 |
print('Using SyncBatchNorm()')
|
181 |
|
182 |
# Exponential moving average
|
183 |
+
ema = ModelEMA(model) if rank in [-1, 0] else None
|
184 |
|
185 |
# DDP mode
|
186 |
if cuda and rank != -1:
|
|
|
451 |
with open(opt.hyp) as f:
|
452 |
hyp.update(yaml.load(f, Loader=yaml.FullLoader)) # update hyps
|
453 |
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
|
454 |
+
device = select_device(opt.device, batch_size=opt.batch_size)
|
455 |
opt.total_batch_size = opt.batch_size
|
456 |
opt.world_size = 1
|
457 |
|
utils/datasets.py
CHANGED
@@ -14,7 +14,7 @@ from PIL import Image, ExifTags
|
|
14 |
from torch.utils.data import Dataset
|
15 |
from tqdm import tqdm
|
16 |
|
17 |
-
from utils.
|
18 |
|
19 |
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
20 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
|
|
|
14 |
from torch.utils.data import Dataset
|
15 |
from tqdm import tqdm
|
16 |
|
17 |
+
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
|
18 |
|
19 |
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
20 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
|
utils/{utils.py → general.py}
RENAMED
@@ -18,10 +18,11 @@ import torch
|
|
18 |
import torch.nn as nn
|
19 |
import torchvision
|
20 |
import yaml
|
|
|
21 |
from scipy.signal import butter, filtfilt
|
22 |
from tqdm import tqdm
|
23 |
|
24 |
-
from . import
|
25 |
|
26 |
# Set printoptions
|
27 |
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
@@ -47,7 +48,7 @@ def torch_distributed_zero_first(local_rank: int):
|
|
47 |
def init_seeds(seed=0):
|
48 |
random.seed(seed)
|
49 |
np.random.seed(seed)
|
50 |
-
|
51 |
|
52 |
|
53 |
def get_latest_run(search_dir='./runs'):
|
@@ -505,7 +506,7 @@ def compute_loss(p, targets, model): # predictions, targets, model
|
|
505 |
|
506 |
def build_targets(p, targets, model):
|
507 |
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
508 |
-
det = model.module.model[-1] if
|
509 |
na, nt = det.na, targets.shape[0] # number of anchors, targets
|
510 |
tcls, tbox, indices, anch = [], [], [], []
|
511 |
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
|
@@ -779,7 +780,6 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10
|
|
779 |
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
780 |
|
781 |
# Kmeans calculation
|
782 |
-
from scipy.cluster.vq import kmeans
|
783 |
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
|
784 |
s = wh.std(0) # sigmas for whitening
|
785 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|
|
|
18 |
import torch.nn as nn
|
19 |
import torchvision
|
20 |
import yaml
|
21 |
+
from scipy.cluster.vq import kmeans
|
22 |
from scipy.signal import butter, filtfilt
|
23 |
from tqdm import tqdm
|
24 |
|
25 |
+
from utils.torch_utils import init_seeds, is_parallel
|
26 |
|
27 |
# Set printoptions
|
28 |
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
|
|
48 |
def init_seeds(seed=0):
|
49 |
random.seed(seed)
|
50 |
np.random.seed(seed)
|
51 |
+
init_seeds(seed=seed)
|
52 |
|
53 |
|
54 |
def get_latest_run(search_dir='./runs'):
|
|
|
506 |
|
507 |
def build_targets(p, targets, model):
|
508 |
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
509 |
+
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
|
510 |
na, nt = det.na, targets.shape[0] # number of anchors, targets
|
511 |
tcls, tbox, indices, anch = [], [], [], []
|
512 |
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
|
|
|
780 |
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
|
781 |
|
782 |
# Kmeans calculation
|
|
|
783 |
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
|
784 |
s = wh.std(0) # sigmas for whitening
|
785 |
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
|