Image Classification
timm
PDE
ConvNet
liuyao commited on
Commit
d80dc9a
1 Parent(s): b6656cb

Update qlnet.py

Browse files
Files changed (1) hide show
  1. qlnet.py +36 -14
qlnet.py CHANGED
@@ -19,6 +19,8 @@ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
19
  from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, \
20
  get_act_layer, get_norm_layer, create_classifier, LayerNorm2d
21
 
 
 
22
 
23
  def get_padding(kernel_size, stride, dilation=1):
24
  padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
@@ -104,8 +106,8 @@ class QLBlock(nn.Module): # quasilinear hyperbolic system
104
  ):
105
  super(QLBlock, self).__init__()
106
 
107
- k = 4 if inplanes <= 256 else 2
108
- width = inplanes * k
109
  outplanes = inplanes if downsample is None else inplanes * 2
110
  first_dilation = first_dilation or dilation
111
 
@@ -114,6 +116,8 @@ class QLBlock(nn.Module): # quasilinear hyperbolic system
114
  dilation=first_dilation, groups=1, bias=False),
115
  norm_layer(width*2))
116
 
 
 
117
  self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
118
  padding=1, dilation=first_dilation, groups=width, bias=False)
119
  self.bn2 = norm_layer(width)
@@ -135,18 +139,17 @@ class QLBlock(nn.Module): # quasilinear hyperbolic system
135
 
136
  def conv_forward(self, x):
137
  conv = self.conv2
138
- k = conv.in_channels
139
- C = x.size()[1] // k
140
  kernel = conv.weight.repeat(C, 1, 1, 1)
141
  bias = conv.bias.repeat(C) if conv.bias is not None else None
142
  return F.conv2d(x, kernel, bias, conv.stride,
143
- conv.padding, conv.dilation, C * k)
144
 
145
  def forward(self, x):
146
  x0 = self.skip(x)
147
  x = self.conv1(x)
148
- C = x.size(1) // 2
149
- x = x[:, :C, :, :] * x[:, C:, :, :]
150
  x = self.conv2(x)
151
  x = self.bn2(x)
152
  x = self.conv3(x)
@@ -209,15 +212,15 @@ class QLNet(nn.Module):
209
  def __init__(
210
  self,
211
  block=QLBlock, # new block
212
- layers=[3,4,6,3], # as in resnet50
213
  num_classes=1000,
214
  in_chans=3,
215
  output_stride=32,
216
  global_pool='avg',
217
  cardinality=1,
218
  base_width=64,
219
- stem_width=64,
220
- stem_type='',
221
  replace_stem_pool=False,
222
  block_reduce_first=1,
223
  down_kernel_size=1,
@@ -280,7 +283,7 @@ class QLNet(nn.Module):
280
  else:
281
  self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
282
  self.bn1 = norm_layer(inplanes)
283
- self.act1 = act_layer(inplace=True)
284
  self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
285
 
286
  # Stem pooling. The name 'maxpool' remains for weight compatibility.
@@ -314,7 +317,7 @@ class QLNet(nn.Module):
314
  self.add_module(*stage) # layer1, layer2, etc
315
  self.feature_info.extend(stage_feature_info)
316
 
317
- self.act = hardball(radius2=512)
318
  # self.act = nn.Hardtanh(max_val=5, min_val=-5, inplace=True)
319
  # self.act = nn.ReLU(inplace=True)
320
 
@@ -361,7 +364,7 @@ class QLNet(nn.Module):
361
  def forward_features(self, x):
362
  x = self.conv1(x)
363
  x = self.bn1(x)
364
- x = self.act1(x)
365
  x = self.maxpool(x)
366
 
367
  if self.grad_checkpointing and not torch.jit.is_scripting():
@@ -379,7 +382,26 @@ class QLNet(nn.Module):
379
 
380
  def forward(self, x):
381
  x = self.forward_features(x)
382
- x = self.act(x)
383
  x = self.forward_head(x)
384
  return x
385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, \
20
  get_act_layer, get_norm_layer, create_classifier, LayerNorm2d
21
 
22
+ from ._builder import build_model_with_cfg
23
+ from ._registry import register_model, model_entrypoint
24
 
25
  def get_padding(kernel_size, stride, dilation=1):
26
  padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
 
106
  ):
107
  super(QLBlock, self).__init__()
108
 
109
+ self.k = 8 if inplanes <= 128 else 4 if inplanes <= 256 else 2
110
+ width = inplanes * self.k
111
  outplanes = inplanes if downsample is None else inplanes * 2
112
  first_dilation = first_dilation or dilation
113
 
 
116
  dilation=first_dilation, groups=1, bias=False),
117
  norm_layer(width*2))
118
 
119
+ # self.conv2 = nn.Conv2d(1, self.k, kernel_size=3, stride=stride,
120
+ # padding=1, dilation=first_dilation, groups=1, bias=False)
121
  self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
122
  padding=1, dilation=first_dilation, groups=width, bias=False)
123
  self.bn2 = norm_layer(width)
 
139
 
140
  def conv_forward(self, x):
141
  conv = self.conv2
142
+ C = x.size(1) // self.k
 
143
  kernel = conv.weight.repeat(C, 1, 1, 1)
144
  bias = conv.bias.repeat(C) if conv.bias is not None else None
145
  return F.conv2d(x, kernel, bias, conv.stride,
146
+ conv.padding, conv.dilation, x.size(1))
147
 
148
  def forward(self, x):
149
  x0 = self.skip(x)
150
  x = self.conv1(x)
151
+ x = x[:, ::2, :, :] * x[:, 1::2, :, :]
152
+
153
  x = self.conv2(x)
154
  x = self.bn2(x)
155
  x = self.conv3(x)
 
212
  def __init__(
213
  self,
214
  block=QLBlock, # new block
215
+ layers=[3,4,12,3], # [3,4,6,3] as in resnet50
216
  num_classes=1000,
217
  in_chans=3,
218
  output_stride=32,
219
  global_pool='avg',
220
  cardinality=1,
221
  base_width=64,
222
+ stem_width=32,
223
+ stem_type='', # 'deep' for resnet50d
224
  replace_stem_pool=False,
225
  block_reduce_first=1,
226
  down_kernel_size=1,
 
283
  else:
284
  self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
285
  self.bn1 = norm_layer(inplanes)
286
+ # self.act1 = act_layer(inplace=True)
287
  self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
288
 
289
  # Stem pooling. The name 'maxpool' remains for weight compatibility.
 
317
  self.add_module(*stage) # layer1, layer2, etc
318
  self.feature_info.extend(stage_feature_info)
319
 
320
+ # self.act = hardball(radius2=512)
321
  # self.act = nn.Hardtanh(max_val=5, min_val=-5, inplace=True)
322
  # self.act = nn.ReLU(inplace=True)
323
 
 
364
  def forward_features(self, x):
365
  x = self.conv1(x)
366
  x = self.bn1(x)
367
+ # x = self.act1(x)
368
  x = self.maxpool(x)
369
 
370
  if self.grad_checkpointing and not torch.jit.is_scripting():
 
382
 
383
  def forward(self, x):
384
  x = self.forward_features(x)
385
+ # x = self.act(x)
386
  x = self.forward_head(x)
387
  return x
388
 
389
+
390
+ def _create_qlnet(variant, pretrained=False, **kwargs):
391
+ return build_model_with_cfg(QLNet, variant, pretrained, **kwargs)
392
+
393
+
394
+ @register_model
395
+ def qlnet22(pretrained=False, **kwargs):
396
+ """Constructs a QLNet22 model.
397
+ """
398
+ model_args = dict(block=QLBlock, layers=[3, 4, 12, 3], **kwargs)
399
+ return _create_qlnet('qlnet22', pretrained, **dict(model_args, **kwargs))
400
+
401
+
402
+ @register_model
403
+ def qlnet26(pretrained=False, **kwargs):
404
+ """Constructs a QLNet26 model.
405
+ """
406
+ model_args = dict(block=QLBlock, layers=[3, 8, 12, 3], **kwargs)
407
+ return _create_qlnet('qlnet26', pretrained, **dict(model_args, **kwargs))