goodfellowliu commited on
Commit
0c4b4b8
2 Parent(s): 09c1b96 db2c3ac

Merge remote-tracking branch 'upstream/master'

Browse files
Files changed (7) hide show
  1. detect.py +1 -1
  2. hubconf.py +2 -2
  3. test.py +10 -8
  4. train.py +5 -5
  5. utils/activations.py +1 -0
  6. utils/google_utils.py +8 -3
  7. utils/utils.py +11 -1
detect.py CHANGED
@@ -18,7 +18,7 @@ def detect(save_img=False):
18
 
19
  # Load model
20
  google_utils.attempt_download(weights)
21
- model = torch.load(weights, map_location=device)['model']
22
  # torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning
23
  # model.fuse()
24
  model.to(device).eval()
 
18
 
19
  # Load model
20
  google_utils.attempt_download(weights)
21
+ model = torch.load(weights, map_location=device)['model'].float() # load to FP32
22
  # torch.save(torch.load(weights, map_location=device), weights) # update model if SourceChangeWarning
23
  # model.fuse()
24
  model.to(device).eval()
hubconf.py CHANGED
@@ -32,8 +32,8 @@ def create(name, pretrained, channels, classes):
32
  if pretrained:
33
  ckpt = '%s.pt' % name # checkpoint filename
34
  google_utils.attempt_download(ckpt) # download if not found locally
35
- state_dict = torch.load(ckpt, map_location=torch.device('cpu'))['model'].state_dict()
36
- state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].numel() == v.numel()} # filter
37
  model.load_state_dict(state_dict, strict=False) # load
38
  return model
39
 
 
32
  if pretrained:
33
  ckpt = '%s.pt' % name # checkpoint filename
34
  google_utils.attempt_download(ckpt) # download if not found locally
35
+ state_dict = torch.load(ckpt, map_location=torch.device('cpu'))['model'].float().state_dict() # to FP32
36
+ state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
37
  model.load_state_dict(state_dict, strict=False) # load
38
  return model
39
 
test.py CHANGED
@@ -23,6 +23,7 @@ def test(data,
23
  verbose=False):
24
  # Initialize/load model and set device
25
  if model is None:
 
26
  device = torch_utils.select_device(opt.device, batch_size=batch_size)
27
  half = device.type != 'cpu' # half precision only supported on CUDA
28
 
@@ -32,9 +33,9 @@ def test(data,
32
 
33
  # Load model
34
  google_utils.attempt_download(weights)
35
- model = torch.load(weights, map_location=device)['model']
36
  torch_utils.model_info(model)
37
- # model.fuse()
38
  model.to(device)
39
  if half:
40
  model.half() # to FP16
@@ -42,11 +43,12 @@ def test(data,
42
  if device.type != 'cpu' and torch.cuda.device_count() > 1:
43
  model = nn.DataParallel(model)
44
 
45
- training = False
46
  else: # called by train.py
47
- device = next(model.parameters()).device # get model device
48
- half = False
49
  training = True
 
 
 
 
50
 
51
  # Configure
52
  model.eval()
@@ -69,7 +71,7 @@ def test(data,
69
  batch_size,
70
  rect=True, # rectangular inference
71
  single_cls=opt.single_cls, # single class mode
72
- pad=0.0 if fast else 0.5) # padding
73
  batch_size = min(batch_size, len(dataset))
74
  nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
75
  dataloader = DataLoader(dataset,
@@ -102,7 +104,7 @@ def test(data,
102
 
103
  # Compute loss
104
  if training: # if model has loss hyperparameters
105
- loss += compute_loss(train_out, targets, model)[1][:3] # GIoU, obj, cls
106
 
107
  # Run NMS
108
  t = torch_utils.time_synchronized()
@@ -255,7 +257,7 @@ if __name__ == '__main__':
255
  opt = parser.parse_args()
256
  opt.img_size = check_img_size(opt.img_size)
257
  opt.save_json = opt.save_json or opt.data.endswith('coco.yaml')
258
- opt.data = glob.glob('./**/' + opt.data, recursive=True)[0] # find file
259
  print(opt)
260
 
261
  # task = 'val', 'test', 'study'
 
23
  verbose=False):
24
  # Initialize/load model and set device
25
  if model is None:
26
+ training = False
27
  device = torch_utils.select_device(opt.device, batch_size=batch_size)
28
  half = device.type != 'cpu' # half precision only supported on CUDA
29
 
 
33
 
34
  # Load model
35
  google_utils.attempt_download(weights)
36
+ model = torch.load(weights, map_location=device)['model'].float() # load to FP32
37
  torch_utils.model_info(model)
38
+ model.fuse()
39
  model.to(device)
40
  if half:
41
  model.half() # to FP16
 
43
  if device.type != 'cpu' and torch.cuda.device_count() > 1:
44
  model = nn.DataParallel(model)
45
 
 
46
  else: # called by train.py
 
 
47
  training = True
48
+ device = next(model.parameters()).device # get model device
49
+ half = device.type != 'cpu' # half precision only supported on CUDA
50
+ if half:
51
+ model.half() # to FP16
52
 
53
  # Configure
54
  model.eval()
 
71
  batch_size,
72
  rect=True, # rectangular inference
73
  single_cls=opt.single_cls, # single class mode
74
+ pad=0.5) # padding
75
  batch_size = min(batch_size, len(dataset))
76
  nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
77
  dataloader = DataLoader(dataset,
 
104
 
105
  # Compute loss
106
  if training: # if model has loss hyperparameters
107
+ loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # GIoU, obj, cls
108
 
109
  # Run NMS
110
  t = torch_utils.time_synchronized()
 
257
  opt = parser.parse_args()
258
  opt.img_size = check_img_size(opt.img_size)
259
  opt.save_json = opt.save_json or opt.data.endswith('coco.yaml')
260
+ opt.data = check_file(opt.data) # check file
261
  print(opt)
262
 
263
  # task = 'val', 'test', 'study'
train.py CHANGED
@@ -112,8 +112,8 @@ def train(hyp):
112
 
113
  # load model
114
  try:
115
- ckpt['model'] = \
116
- {k: v for k, v in ckpt['model'].state_dict().items() if model.state_dict()[k].numel() == v.numel()}
117
  model.load_state_dict(ckpt['model'], strict=False)
118
  except KeyError as e:
119
  s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s." \
@@ -363,6 +363,7 @@ def train(hyp):
363
 
364
 
365
  if __name__ == '__main__':
 
366
  parser = argparse.ArgumentParser()
367
  parser.add_argument('--epochs', type=int, default=300)
368
  parser.add_argument('--batch-size', type=int, default=16)
@@ -384,12 +385,11 @@ if __name__ == '__main__':
384
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
385
  opt = parser.parse_args()
386
  opt.weights = last if opt.resume else opt.weights
387
- opt.cfg = glob.glob('./**/' + opt.cfg, recursive=True)[0] # find file
388
- opt.data = glob.glob('./**/' + opt.data, recursive=True)[0] # find file
389
  print(opt)
390
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
391
  device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
392
- # check_git_status()
393
  if device.type == 'cpu':
394
  mixed_precision = False
395
 
 
112
 
113
  # load model
114
  try:
115
+ ckpt['model'] = {k: v for k, v in ckpt['model'].float().state_dict().items()
116
+ if model.state_dict()[k].shape == v.shape} # to FP32, filter
117
  model.load_state_dict(ckpt['model'], strict=False)
118
  except KeyError as e:
119
  s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s." \
 
363
 
364
 
365
  if __name__ == '__main__':
366
+ check_git_status()
367
  parser = argparse.ArgumentParser()
368
  parser.add_argument('--epochs', type=int, default=300)
369
  parser.add_argument('--batch-size', type=int, default=16)
 
385
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
386
  opt = parser.parse_args()
387
  opt.weights = last if opt.resume else opt.weights
388
+ opt.cfg = check_file(opt.cfg) # check file
389
+ opt.data = check_file(opt.data) # check file
390
  print(opt)
391
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
392
  device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
 
393
  if device.type == 'cpu':
394
  mixed_precision = False
395
 
utils/activations.py CHANGED
@@ -1,4 +1,5 @@
1
  import torch
 
2
  import torch.nn.functional as F
3
  import torch.nn as nn
4
 
 
1
  import torch
2
+ import torch.nn as nn
3
  import torch.nn.functional as F
4
  import torch.nn as nn
5
 
utils/google_utils.py CHANGED
@@ -25,10 +25,15 @@ def attempt_download(weights):
25
  if file in d:
26
  r = gdrive_download(id=d[file], name=weights)
27
 
28
- # Error check
29
  if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
30
- os.system('rm ' + weights) # remove partial downloads
31
- raise Exception(msg)
 
 
 
 
 
 
32
 
33
 
34
  def gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):
 
25
  if file in d:
26
  r = gdrive_download(id=d[file], name=weights)
27
 
 
28
  if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
29
+ os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
30
+ s = "curl -L -o %s 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/%s'" % (weights, file)
31
+ r = os.system(s) # execute, capture return values
32
+
33
+ # Error check
34
+ if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
35
+ os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
36
+ raise Exception(msg)
37
 
38
 
39
  def gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'):
utils/utils.py CHANGED
@@ -64,6 +64,16 @@ def check_best_possible_recall(dataset, anchors, thr):
64
  'Compute new anchors with utils.utils.kmeans_anchors() and update model before training.' % bpr
65
 
66
 
 
 
 
 
 
 
 
 
 
 
67
  def make_divisible(x, divisor):
68
  # Returns x evenly divisble by divisor
69
  return math.ceil(x / divisor) * divisor
@@ -518,7 +528,7 @@ def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, c
518
  fast |= conf_thres > 0.001 # fast mode
519
  if fast:
520
  merge = False
521
- multi_label = False
522
  else:
523
  merge = True # merge for best mAP (adds 0.5ms/img)
524
  multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
 
64
  'Compute new anchors with utils.utils.kmeans_anchors() and update model before training.' % bpr
65
 
66
 
67
+ def check_file(file):
68
+ # Searches for file if not found locally
69
+ if os.path.isfile(file):
70
+ return file
71
+ else:
72
+ files = glob.glob('./**/' + file, recursive=True) # find file
73
+ assert len(files), 'File Not Found: %s' % file # assert file was found
74
+ return files[0] # return first file if multiple found
75
+
76
+
77
  def make_divisible(x, divisor):
78
  # Returns x evenly divisble by divisor
79
  return math.ceil(x / divisor) * divisor
 
528
  fast |= conf_thres > 0.001 # fast mode
529
  if fast:
530
  merge = False
531
+ multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
532
  else:
533
  merge = True # merge for best mAP (adds 0.5ms/img)
534
  multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)