glenn-jocher commited on
Commit
ad4c22c
1 Parent(s): b9da3ea

--resume bug fix #187

Browse files
Files changed (1) hide show
  1. train.py +7 -3
train.py CHANGED
@@ -133,9 +133,13 @@ def train(hyp):
133
  with open(results_file, 'w') as file:
134
  file.write(ckpt['training_results']) # write results.txt
135
 
 
136
  start_epoch = ckpt['epoch'] + 1
137
- assert opt.epochs > start_epoch, '%s has already trained %g epochs. --epochs must be greater than %g' % \
138
- (opt.weights, ckpt['epoch'], ckpt['epoch'])
 
 
 
139
  del ckpt
140
 
141
  # Mixed precision training https://github.com/NVIDIA/apex
@@ -166,7 +170,7 @@ def train(hyp):
166
 
167
  # Testloader
168
  testloader = create_dataloader(test_path, imgsz_test, batch_size, gs, opt,
169
- hyp=hyp, augment=False, cache=opt.cache_images, rect=True)[0]
170
 
171
  # Model parameters
172
  hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
 
133
  with open(results_file, 'w') as file:
134
  file.write(ckpt['training_results']) # write results.txt
135
 
136
+ # epochs
137
  start_epoch = ckpt['epoch'] + 1
138
+ if epochs < start_epoch:
139
+ print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
140
+ (opt.weights, ckpt['epoch'], epochs))
141
+ epochs += ckpt['epoch'] # finetune additional epochs
142
+
143
  del ckpt
144
 
145
  # Mixed precision training https://github.com/NVIDIA/apex
 
170
 
171
  # Testloader
172
  testloader = create_dataloader(test_path, imgsz_test, batch_size, gs, opt,
173
+ hyp=hyp, augment=False, cache=opt.cache_images, rect=True)[0]
174
 
175
  # Model parameters
176
  hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset