glenn-jocher commited on
Commit
4e2d246
1 Parent(s): 11ba529

update yolo.py

Browse files
Files changed (1) hide show
  1. models/yolo.py +12 -8
models/yolo.py CHANGED
@@ -59,10 +59,14 @@ class Model(nn.Module):
59
 
60
  # Build strides, anchors
61
  m = self.model[-1] # Detect()
62
- m.stride = torch.tensor([128 / x.shape[-2] for x in self.forward(torch.zeros(1, ch, 128, 128))]) # forward
63
- m.anchors /= m.stride.view(-1, 1, 1)
64
- check_anchor_order(m)
65
- self.stride = m.stride
 
 
 
 
66
 
67
  # Init weights, biases
68
  torch_utils.initialize_weights(self)
@@ -146,7 +150,7 @@ class Model(nn.Module):
146
 
147
 
148
  def parse_model(md, ch): # model_dict, input_channels(3)
149
- print('\n%3s%15s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
150
  anchors, nc, gd, gw = md['anchors'], md['nc'], md['depth_multiple'], md['width_multiple']
151
  na = (len(anchors[0]) // 2) # number of anchors
152
  no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
@@ -161,7 +165,7 @@ def parse_model(md, ch): # model_dict, input_channels(3)
161
  pass
162
 
163
  n = max(round(n * gd), 1) if n > 1 else n # depth gain
164
- if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, BottleneckCSP, CrossConv]:
165
  c1, c2 = ch[f], args[0]
166
 
167
  # Normal
@@ -182,7 +186,7 @@ def parse_model(md, ch): # model_dict, input_channels(3)
182
  # c2 = make_divisible(c2, 8) if c2 != no else c2
183
 
184
  args = [c1, c2, *args[1:]]
185
- if m is BottleneckCSP:
186
  args.insert(2, n)
187
  n = 1
188
  elif m is nn.BatchNorm2d:
@@ -198,7 +202,7 @@ def parse_model(md, ch): # model_dict, input_channels(3)
198
  t = str(m)[8:-2].replace('__main__.', '') # module type
199
  np = sum([x.numel() for x in m_.parameters()]) # number params
200
  m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
201
- print('%3s%15s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
202
  save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
203
  layers.append(m_)
204
  ch.append(c2)
 
59
 
60
  # Build strides, anchors
61
  m = self.model[-1] # Detect()
62
+ if isinstance(m, Detect):
63
+ s = 128 # 2x min stride
64
+ m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
65
+ m.anchors /= m.stride.view(-1, 1, 1)
66
+ check_anchor_order(m)
67
+ self.stride = m.stride
68
+ self._initialize_biases() # only run once
69
+ # print('Strides: %s' % m.stride.tolist())
70
 
71
  # Init weights, biases
72
  torch_utils.initialize_weights(self)
 
150
 
151
 
152
  def parse_model(md, ch): # model_dict, input_channels(3)
153
+ print('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
154
  anchors, nc, gd, gw = md['anchors'], md['nc'], md['depth_multiple'], md['width_multiple']
155
  na = (len(anchors[0]) // 2) # number of anchors
156
  no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
 
165
  pass
166
 
167
  n = max(round(n * gd), 1) if n > 1 else n # depth gain
168
+ if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
169
  c1, c2 = ch[f], args[0]
170
 
171
  # Normal
 
186
  # c2 = make_divisible(c2, 8) if c2 != no else c2
187
 
188
  args = [c1, c2, *args[1:]]
189
+ if m in [BottleneckCSP, C3]:
190
  args.insert(2, n)
191
  n = 1
192
  elif m is nn.BatchNorm2d:
 
202
  t = str(m)[8:-2].replace('__main__.', '') # module type
203
  np = sum([x.numel() for x in m_.parameters()]) # number params
204
  m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
205
+ print('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
206
  save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
207
  layers.append(m_)
208
  ch.append(c2)