pre-commit-ci[bot] pre-commit-ci[bot] glenn-jocher commited on
Commit
7882950
β€’
1 Parent(s): ea72b84

[pre-commit.ci] pre-commit suggestions (#7279)

Browse files

* [pre-commit.ci] pre-commit suggestions

updates:
- [github.com/asottile/pyupgrade: v2.31.0 β†’ v2.31.1](https://github.com/asottile/pyupgrade/compare/v2.31.0...v2.31.1)
- [github.com/pre-commit/mirrors-yapf: v0.31.0 β†’ v0.32.0](https://github.com/pre-commit/mirrors-yapf/compare/v0.31.0...v0.32.0)

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Update yolo.py

* Update activations.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Update activations.py

* Update tf.py

* Update tf.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

.pre-commit-config.yaml CHANGED
@@ -24,7 +24,7 @@ repos:
24
  - id: check-docstring-first
25
 
26
  - repo: https://github.com/asottile/pyupgrade
27
- rev: v2.31.0
28
  hooks:
29
  - id: pyupgrade
30
  args: [--py36-plus]
@@ -37,7 +37,7 @@ repos:
37
  name: Sort imports
38
 
39
  - repo: https://github.com/pre-commit/mirrors-yapf
40
- rev: v0.31.0
41
  hooks:
42
  - id: yapf
43
  name: YAPF formatting
 
24
  - id: check-docstring-first
25
 
26
  - repo: https://github.com/asottile/pyupgrade
27
+ rev: v2.31.1
28
  hooks:
29
  - id: pyupgrade
30
  args: [--py36-plus]
 
37
  name: Sort imports
38
 
39
  - repo: https://github.com/pre-commit/mirrors-yapf
40
+ rev: v0.32.0
41
  hooks:
42
  - id: yapf
43
  name: YAPF formatting
models/tf.py CHANGED
@@ -50,6 +50,7 @@ class TFBN(keras.layers.Layer):
50
 
51
 
52
  class TFPad(keras.layers.Layer):
 
53
  def __init__(self, pad):
54
  super().__init__()
55
  self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
@@ -206,6 +207,7 @@ class TFSPPF(keras.layers.Layer):
206
 
207
 
208
  class TFDetect(keras.layers.Layer):
 
209
  def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
210
  super().__init__()
211
  self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
@@ -255,6 +257,7 @@ class TFDetect(keras.layers.Layer):
255
 
256
 
257
  class TFUpsample(keras.layers.Layer):
 
258
  def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
259
  super().__init__()
260
  assert scale_factor == 2, "scale_factor must be 2"
@@ -269,6 +272,7 @@ class TFUpsample(keras.layers.Layer):
269
 
270
 
271
  class TFConcat(keras.layers.Layer):
 
272
  def __init__(self, dimension=1, w=None):
273
  super().__init__()
274
  assert dimension == 1, "convert only NCHW to NHWC concat"
@@ -331,6 +335,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
331
 
332
 
333
  class TFModel:
 
334
  def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
335
  super().__init__()
336
  if isinstance(cfg, dict):
 
50
 
51
 
52
  class TFPad(keras.layers.Layer):
53
+
54
  def __init__(self, pad):
55
  super().__init__()
56
  self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
 
207
 
208
 
209
  class TFDetect(keras.layers.Layer):
210
+ # TF YOLOv5 Detect layer
211
  def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
212
  super().__init__()
213
  self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
 
257
 
258
 
259
  class TFUpsample(keras.layers.Layer):
260
+ # TF version of torch.nn.Upsample()
261
  def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
262
  super().__init__()
263
  assert scale_factor == 2, "scale_factor must be 2"
 
272
 
273
 
274
  class TFConcat(keras.layers.Layer):
275
+ # TF version of torch.concat()
276
  def __init__(self, dimension=1, w=None):
277
  super().__init__()
278
  assert dimension == 1, "convert only NCHW to NHWC concat"
 
335
 
336
 
337
  class TFModel:
338
+ # TF YOLOv5 model
339
  def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
340
  super().__init__()
341
  if isinstance(cfg, dict):
models/yolo.py CHANGED
@@ -88,6 +88,7 @@ class Detect(nn.Module):
88
 
89
 
90
  class Model(nn.Module):
 
91
  def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
92
  super().__init__()
93
  if isinstance(cfg, dict):
 
88
 
89
 
90
  class Model(nn.Module):
91
+ # YOLOv5 model
92
  def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
93
  super().__init__()
94
  if isinstance(cfg, dict):
utils/activations.py CHANGED
@@ -8,29 +8,32 @@ import torch.nn as nn
8
  import torch.nn.functional as F
9
 
10
 
11
- # SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
12
- class SiLU(nn.Module): # export-friendly version of nn.SiLU()
13
  @staticmethod
14
  def forward(x):
15
  return x * torch.sigmoid(x)
16
 
17
 
18
- class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
 
19
  @staticmethod
20
  def forward(x):
21
  # return x * F.hardsigmoid(x) # for TorchScript and CoreML
22
  return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
23
 
24
 
25
- # Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
26
  class Mish(nn.Module):
 
27
  @staticmethod
28
  def forward(x):
29
  return x * F.softplus(x).tanh()
30
 
31
 
32
  class MemoryEfficientMish(nn.Module):
 
33
  class F(torch.autograd.Function):
 
34
  @staticmethod
35
  def forward(ctx, x):
36
  ctx.save_for_backward(x)
@@ -47,8 +50,8 @@ class MemoryEfficientMish(nn.Module):
47
  return self.F.apply(x)
48
 
49
 
50
- # FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
51
  class FReLU(nn.Module):
 
52
  def __init__(self, c1, k=3): # ch_in, kernel
53
  super().__init__()
54
  self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
@@ -58,12 +61,12 @@ class FReLU(nn.Module):
58
  return torch.max(x, self.bn(self.conv(x)))
59
 
60
 
61
- # ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
62
  class AconC(nn.Module):
63
- r""" ACON activation (activate or not).
64
  AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
65
  according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
66
  """
 
67
  def __init__(self, c1):
68
  super().__init__()
69
  self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
@@ -76,10 +79,11 @@ class AconC(nn.Module):
76
 
77
 
78
  class MetaAconC(nn.Module):
79
- r""" ACON activation (activate or not).
80
  MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
81
  according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
82
  """
 
83
  def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
84
  super().__init__()
85
  c2 = max(r, c1 // r)
 
8
  import torch.nn.functional as F
9
 
10
 
11
+ class SiLU(nn.Module):
12
+ # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13
  @staticmethod
14
  def forward(x):
15
  return x * torch.sigmoid(x)
16
 
17
 
18
+ class Hardswish(nn.Module):
19
+ # Hard-SiLU activation
20
  @staticmethod
21
  def forward(x):
22
  # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23
  return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24
 
25
 
 
26
  class Mish(nn.Module):
27
+ # Mish activation https://github.com/digantamisra98/Mish
28
  @staticmethod
29
  def forward(x):
30
  return x * F.softplus(x).tanh()
31
 
32
 
33
  class MemoryEfficientMish(nn.Module):
34
+ # Mish activation memory-efficient
35
  class F(torch.autograd.Function):
36
+
37
  @staticmethod
38
  def forward(ctx, x):
39
  ctx.save_for_backward(x)
 
50
  return self.F.apply(x)
51
 
52
 
 
53
  class FReLU(nn.Module):
54
+ # FReLU activation https://arxiv.org/abs/2007.11824
55
  def __init__(self, c1, k=3): # ch_in, kernel
56
  super().__init__()
57
  self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
 
61
  return torch.max(x, self.bn(self.conv(x)))
62
 
63
 
 
64
  class AconC(nn.Module):
65
+ r""" ACON activation (activate or not)
66
  AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67
  according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
68
  """
69
+
70
  def __init__(self, c1):
71
  super().__init__()
72
  self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
 
79
 
80
 
81
  class MetaAconC(nn.Module):
82
+ r""" ACON activation (activate or not)
83
  MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84
  according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
85
  """
86
+
87
  def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88
  super().__init__()
89
  c2 = max(r, c1 // r)
utils/callbacks.py CHANGED
@@ -8,6 +8,7 @@ class Callbacks:
8
  """"
9
  Handles all registered callbacks for YOLOv5 Hooks
10
  """
 
11
  def __init__(self):
12
  # Define the available callbacks
13
  self._callbacks = {
 
8
  """"
9
  Handles all registered callbacks for YOLOv5 Hooks
10
  """
11
+
12
  def __init__(self):
13
  # Define the available callbacks
14
  self._callbacks = {
utils/datasets.py CHANGED
@@ -145,6 +145,7 @@ class InfiniteDataLoader(dataloader.DataLoader):
145
 
146
  Uses same syntax as vanilla DataLoader
147
  """
 
148
  def __init__(self, *args, **kwargs):
149
  super().__init__(*args, **kwargs)
150
  object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
@@ -164,6 +165,7 @@ class _RepeatSampler:
164
  Args:
165
  sampler (Sampler)
166
  """
 
167
  def __init__(self, sampler):
168
  self.sampler = sampler
169
 
@@ -978,6 +980,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil
978
  autodownload: Attempt to download dataset if not found locally
979
  verbose: Print stats dictionary
980
  """
 
981
  def round_labels(labels):
982
  # Update labels to integer class and 6 decimal place floats
983
  return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
 
145
 
146
  Uses same syntax as vanilla DataLoader
147
  """
148
+
149
  def __init__(self, *args, **kwargs):
150
  super().__init__(*args, **kwargs)
151
  object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
 
165
  Args:
166
  sampler (Sampler)
167
  """
168
+
169
  def __init__(self, sampler):
170
  self.sampler = sampler
171
 
 
980
  autodownload: Attempt to download dataset if not found locally
981
  verbose: Print stats dictionary
982
  """
983
+
984
  def round_labels(labels):
985
  # Update labels to integer class and 6 decimal place floats
986
  return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
utils/loggers/wandb/wandb_utils.py CHANGED
@@ -116,6 +116,7 @@ class WandbLogger():
116
  For more on how this logger is used, see the Weights & Biases documentation:
117
  https://docs.wandb.com/guides/integrations/yolov5
118
  """
 
119
  def __init__(self, opt, run_id=None, job_type='Training'):
120
  """
121
  - Initialize WandbLogger instance
 
116
  For more on how this logger is used, see the Weights & Biases documentation:
117
  https://docs.wandb.com/guides/integrations/yolov5
118
  """
119
+
120
  def __init__(self, opt, run_id=None, job_type='Training'):
121
  """
122
  - Initialize WandbLogger instance
utils/metrics.py CHANGED
@@ -260,6 +260,7 @@ def box_iou(box1, box2):
260
  iou (Tensor[N, M]): the NxM matrix containing the pairwise
261
  IoU values for every element in boxes1 and boxes2
262
  """
 
263
  def box_area(box):
264
  # box = 4xn
265
  return (box[2] - box[0]) * (box[3] - box[1])
 
260
  iou (Tensor[N, M]): the NxM matrix containing the pairwise
261
  IoU values for every element in boxes1 and boxes2
262
  """
263
+
264
  def box_area(box):
265
  # box = 4xn
266
  return (box[2] - box[0]) * (box[3] - box[1])
utils/torch_utils.py CHANGED
@@ -284,6 +284,7 @@ class ModelEMA:
284
  Keeps a moving average of everything in the model state_dict (parameters and buffers)
285
  For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
286
  """
 
287
  def __init__(self, model, decay=0.9999, tau=2000, updates=0):
288
  # Create EMA
289
  self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
 
284
  Keeps a moving average of everything in the model state_dict (parameters and buffers)
285
  For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
286
  """
287
+
288
  def __init__(self, model, decay=0.9999, tau=2000, updates=0):
289
  # Create EMA
290
  self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA