meg HF staff commited on
Commit
99aa18d
1 Parent(s): e411e4d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc +0 -0
  2. pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc +0 -0
  3. pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc +0 -0
  4. pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc +0 -0
  5. pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc +0 -0
  6. pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc +0 -0
  7. pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc +0 -0
  8. pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc +0 -0
  9. pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc +0 -0
  10. pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc +0 -0
  11. pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc +0 -0
  12. pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc +0 -0
  13. pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc +0 -0
  14. pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc +0 -0
  15. pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc +0 -0
  16. pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc +0 -0
  17. pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc +0 -0
  18. pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc +0 -0
  19. pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc +0 -0
  20. pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc +0 -0
  21. pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc +0 -0
  22. pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc +0 -0
  23. pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc +0 -0
  24. pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc +0 -0
  25. pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc +0 -0
  26. pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc +0 -0
  27. pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc +0 -0
  28. pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc +0 -0
  29. pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc +0 -0
  30. pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc +0 -0
  31. pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc +0 -0
  32. pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc +0 -0
  33. pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc +0 -0
  34. pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc +0 -0
  35. pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt +1 -0
  36. pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt +1 -0
  37. pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt +1 -0
  38. pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt +1 -0
  39. pytorch-image-models/timm/models/resnet.py +0 -0
  40. pytorch-image-models/timm/models/resnetv2.py +911 -0
  41. pytorch-image-models/timm/models/rexnet.py +358 -0
  42. pytorch-image-models/timm/models/sknet.py +240 -0
  43. pytorch-image-models/timm/models/swin_transformer_v2.py +1088 -0
  44. pytorch-image-models/timm/models/swin_transformer_v2_cr.py +1153 -0
  45. pytorch-image-models/timm/models/tiny_vit.py +715 -0
  46. pytorch-image-models/timm/models/tnt.py +374 -0
  47. pytorch-image-models/timm/models/tresnet.py +346 -0
  48. pytorch-image-models/timm/models/twins.py +581 -0
  49. pytorch-image-models/timm/models/vgg.py +298 -0
  50. pytorch-image-models/timm/models/visformer.py +549 -0
pytorch-image-models/timm/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (548 Bytes). View file
 
pytorch-image-models/timm/models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (3.79 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_builder.cpython-39.pyc ADDED
Binary file (12 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_efficientnet_blocks.cpython-39.pyc ADDED
Binary file (17.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_helpers.cpython-39.pyc ADDED
Binary file (4.31 kB). View file
 
pytorch-image-models/timm/models/__pycache__/byoanet.cpython-39.pyc ADDED
Binary file (11.2 kB). View file
 
pytorch-image-models/timm/models/__pycache__/byobnet.cpython-39.pyc ADDED
Binary file (67.5 kB). View file
 
pytorch-image-models/timm/models/__pycache__/cait.cpython-39.pyc ADDED
Binary file (17.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/convit.cpython-39.pyc ADDED
Binary file (12.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/convmixer.cpython-39.pyc ADDED
Binary file (5.17 kB). View file
 
pytorch-image-models/timm/models/__pycache__/convnext.cpython-39.pyc ADDED
Binary file (37.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/densenet.cpython-39.pyc ADDED
Binary file (12.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/edgenext.cpython-39.pyc ADDED
Binary file (15.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/efficientnet.cpython-39.pyc ADDED
Binary file (84.2 kB). View file
 
pytorch-image-models/timm/models/__pycache__/gcvit.cpython-39.pyc ADDED
Binary file (19.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/ghostnet.cpython-39.pyc ADDED
Binary file (11.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/hrnet.cpython-39.pyc ADDED
Binary file (20.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/maxxvit.cpython-39.pyc ADDED
Binary file (60.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/mobilenetv3.cpython-39.pyc ADDED
Binary file (35.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/nfnet.cpython-39.pyc ADDED
Binary file (31.4 kB). View file
 
pytorch-image-models/timm/models/__pycache__/pit.cpython-39.pyc ADDED
Binary file (13 kB). View file
 
pytorch-image-models/timm/models/__pycache__/pvt_v2.cpython-39.pyc ADDED
Binary file (15.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/repvit.cpython-39.pyc ADDED
Binary file (15.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/res2net.cpython-39.pyc ADDED
Binary file (6.24 kB). View file
 
pytorch-image-models/timm/models/__pycache__/resnest.cpython-39.pyc ADDED
Binary file (7.35 kB). View file
 
pytorch-image-models/timm/models/__pycache__/resnet.cpython-39.pyc ADDED
Binary file (70.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/senet.cpython-39.pyc ADDED
Binary file (15.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/sequencer.cpython-39.pyc ADDED
Binary file (13.8 kB). View file
 
pytorch-image-models/timm/models/__pycache__/sknet.cpython-39.pyc ADDED
Binary file (7.41 kB). View file
 
pytorch-image-models/timm/models/__pycache__/swin_transformer_v2.cpython-39.pyc ADDED
Binary file (34.7 kB). View file
 
pytorch-image-models/timm/models/__pycache__/tiny_vit.cpython-39.pyc ADDED
Binary file (19.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/visformer.cpython-39.pyc ADDED
Binary file (12.1 kB). View file
 
pytorch-image-models/timm/models/__pycache__/vision_transformer.cpython-39.pyc ADDED
Binary file (101 kB). View file
 
pytorch-image-models/timm/models/__pycache__/xcit.cpython-39.pyc ADDED
Binary file (32.6 kB). View file
 
pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042]
pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022]
pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000]
pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000]
pytorch-image-models/timm/models/resnet.py ADDED
The diff for this file is too large to render. See raw diff
 
pytorch-image-models/timm/models/resnetv2.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pre-Activation ResNet v2 with GroupNorm and Weight Standardization.
2
+
3
+ A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfer (BiT) source code
4
+ at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have
5
+ been included here as pretrained models from their original .NPZ checkpoints.
6
+
7
+ Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and
8
+ extra padding support to allow porting of official Hybrid ResNet pretrained weights from
9
+ https://github.com/google-research/vision_transformer
10
+
11
+ Thanks to the Google team for the above two repositories and associated papers:
12
+ * Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370
13
+ * An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929
14
+ * Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
15
+
16
+ Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020.
17
+ """
18
+ # Copyright 2020 Google LLC
19
+ #
20
+ # Licensed under the Apache License, Version 2.0 (the "License");
21
+ # you may not use this file except in compliance with the License.
22
+ # You may obtain a copy of the License at
23
+ #
24
+ # http://www.apache.org/licenses/LICENSE-2.0
25
+ #
26
+ # Unless required by applicable law or agreed to in writing, software
27
+ # distributed under the License is distributed on an "AS IS" BASIS,
28
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
29
+ # See the License for the specific language governing permissions and
30
+ # limitations under the License.
31
+
32
+ from collections import OrderedDict # pylint: disable=g-importing-member
33
+ from functools import partial
34
+ from typing import Optional
35
+
36
+ import torch
37
+ import torch.nn as nn
38
+
39
+ from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
40
+ from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \
41
+ DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible
42
+ from ._builder import build_model_with_cfg
43
+ from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv
44
+ from ._registry import generate_default_cfgs, register_model, register_model_deprecations
45
+
46
+ __all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this
47
+
48
+
49
+ class PreActBasic(nn.Module):
50
+ """ Pre-activation basic block (not in typical 'v2' implementations)
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ in_chs,
56
+ out_chs=None,
57
+ bottle_ratio=1.0,
58
+ stride=1,
59
+ dilation=1,
60
+ first_dilation=None,
61
+ groups=1,
62
+ act_layer=None,
63
+ conv_layer=None,
64
+ norm_layer=None,
65
+ proj_layer=None,
66
+ drop_path_rate=0.,
67
+ ):
68
+ super().__init__()
69
+ first_dilation = first_dilation or dilation
70
+ conv_layer = conv_layer or StdConv2d
71
+ norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
72
+ out_chs = out_chs or in_chs
73
+ mid_chs = make_divisible(out_chs * bottle_ratio)
74
+
75
+ if proj_layer is not None and (stride != 1 or first_dilation != dilation or in_chs != out_chs):
76
+ self.downsample = proj_layer(
77
+ in_chs,
78
+ out_chs,
79
+ stride=stride,
80
+ dilation=dilation,
81
+ first_dilation=first_dilation,
82
+ preact=True,
83
+ conv_layer=conv_layer,
84
+ norm_layer=norm_layer,
85
+ )
86
+ else:
87
+ self.downsample = None
88
+
89
+ self.norm1 = norm_layer(in_chs)
90
+ self.conv1 = conv_layer(in_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
91
+ self.norm2 = norm_layer(mid_chs)
92
+ self.conv2 = conv_layer(mid_chs, out_chs, 3, dilation=dilation, groups=groups)
93
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
94
+
95
+ def zero_init_last(self):
96
+ nn.init.zeros_(self.conv3.weight)
97
+
98
+ def forward(self, x):
99
+ x_preact = self.norm1(x)
100
+
101
+ # shortcut branch
102
+ shortcut = x
103
+ if self.downsample is not None:
104
+ shortcut = self.downsample(x_preact)
105
+
106
+ # residual branch
107
+ x = self.conv1(x_preact)
108
+ x = self.conv2(self.norm2(x))
109
+ x = self.drop_path(x)
110
+ return x + shortcut
111
+
112
+
113
+ class PreActBottleneck(nn.Module):
114
+ """Pre-activation (v2) bottleneck block.
115
+
116
+ Follows the implementation of "Identity Mappings in Deep Residual Networks":
117
+ https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
118
+
119
+ Except it puts the stride on 3x3 conv when available.
120
+ """
121
+
122
+ def __init__(
123
+ self,
124
+ in_chs,
125
+ out_chs=None,
126
+ bottle_ratio=0.25,
127
+ stride=1,
128
+ dilation=1,
129
+ first_dilation=None,
130
+ groups=1,
131
+ act_layer=None,
132
+ conv_layer=None,
133
+ norm_layer=None,
134
+ proj_layer=None,
135
+ drop_path_rate=0.,
136
+ ):
137
+ super().__init__()
138
+ first_dilation = first_dilation or dilation
139
+ conv_layer = conv_layer or StdConv2d
140
+ norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
141
+ out_chs = out_chs or in_chs
142
+ mid_chs = make_divisible(out_chs * bottle_ratio)
143
+
144
+ if proj_layer is not None:
145
+ self.downsample = proj_layer(
146
+ in_chs,
147
+ out_chs,
148
+ stride=stride,
149
+ dilation=dilation,
150
+ first_dilation=first_dilation,
151
+ preact=True,
152
+ conv_layer=conv_layer,
153
+ norm_layer=norm_layer,
154
+ )
155
+ else:
156
+ self.downsample = None
157
+
158
+ self.norm1 = norm_layer(in_chs)
159
+ self.conv1 = conv_layer(in_chs, mid_chs, 1)
160
+ self.norm2 = norm_layer(mid_chs)
161
+ self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
162
+ self.norm3 = norm_layer(mid_chs)
163
+ self.conv3 = conv_layer(mid_chs, out_chs, 1)
164
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
165
+
166
+ def zero_init_last(self):
167
+ nn.init.zeros_(self.conv3.weight)
168
+
169
+ def forward(self, x):
170
+ x_preact = self.norm1(x)
171
+
172
+ # shortcut branch
173
+ shortcut = x
174
+ if self.downsample is not None:
175
+ shortcut = self.downsample(x_preact)
176
+
177
+ # residual branch
178
+ x = self.conv1(x_preact)
179
+ x = self.conv2(self.norm2(x))
180
+ x = self.conv3(self.norm3(x))
181
+ x = self.drop_path(x)
182
+ return x + shortcut
183
+
184
+
185
+ class Bottleneck(nn.Module):
186
+ """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT.
187
+ """
188
+ def __init__(
189
+ self,
190
+ in_chs,
191
+ out_chs=None,
192
+ bottle_ratio=0.25,
193
+ stride=1,
194
+ dilation=1,
195
+ first_dilation=None,
196
+ groups=1,
197
+ act_layer=None,
198
+ conv_layer=None,
199
+ norm_layer=None,
200
+ proj_layer=None,
201
+ drop_path_rate=0.,
202
+ ):
203
+ super().__init__()
204
+ first_dilation = first_dilation or dilation
205
+ act_layer = act_layer or nn.ReLU
206
+ conv_layer = conv_layer or StdConv2d
207
+ norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
208
+ out_chs = out_chs or in_chs
209
+ mid_chs = make_divisible(out_chs * bottle_ratio)
210
+
211
+ if proj_layer is not None:
212
+ self.downsample = proj_layer(
213
+ in_chs,
214
+ out_chs,
215
+ stride=stride,
216
+ dilation=dilation,
217
+ preact=False,
218
+ conv_layer=conv_layer,
219
+ norm_layer=norm_layer,
220
+ )
221
+ else:
222
+ self.downsample = None
223
+
224
+ self.conv1 = conv_layer(in_chs, mid_chs, 1)
225
+ self.norm1 = norm_layer(mid_chs)
226
+ self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
227
+ self.norm2 = norm_layer(mid_chs)
228
+ self.conv3 = conv_layer(mid_chs, out_chs, 1)
229
+ self.norm3 = norm_layer(out_chs, apply_act=False)
230
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
231
+ self.act3 = act_layer(inplace=True)
232
+
233
+ def zero_init_last(self):
234
+ if getattr(self.norm3, 'weight', None) is not None:
235
+ nn.init.zeros_(self.norm3.weight)
236
+
237
+ def forward(self, x):
238
+ # shortcut branch
239
+ shortcut = x
240
+ if self.downsample is not None:
241
+ shortcut = self.downsample(x)
242
+
243
+ # residual
244
+ x = self.conv1(x)
245
+ x = self.norm1(x)
246
+ x = self.conv2(x)
247
+ x = self.norm2(x)
248
+ x = self.conv3(x)
249
+ x = self.norm3(x)
250
+ x = self.drop_path(x)
251
+ x = self.act3(x + shortcut)
252
+ return x
253
+
254
+
255
+ class DownsampleConv(nn.Module):
256
+ def __init__(
257
+ self,
258
+ in_chs,
259
+ out_chs,
260
+ stride=1,
261
+ dilation=1,
262
+ first_dilation=None,
263
+ preact=True,
264
+ conv_layer=None,
265
+ norm_layer=None,
266
+ ):
267
+ super(DownsampleConv, self).__init__()
268
+ self.conv = conv_layer(in_chs, out_chs, 1, stride=stride)
269
+ self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
270
+
271
+ def forward(self, x):
272
+ return self.norm(self.conv(x))
273
+
274
+
275
+ class DownsampleAvg(nn.Module):
276
+ def __init__(
277
+ self,
278
+ in_chs,
279
+ out_chs,
280
+ stride=1,
281
+ dilation=1,
282
+ first_dilation=None,
283
+ preact=True,
284
+ conv_layer=None,
285
+ norm_layer=None,
286
+ ):
287
+ """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
288
+ super(DownsampleAvg, self).__init__()
289
+ avg_stride = stride if dilation == 1 else 1
290
+ if stride > 1 or dilation > 1:
291
+ avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
292
+ self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
293
+ else:
294
+ self.pool = nn.Identity()
295
+ self.conv = conv_layer(in_chs, out_chs, 1, stride=1)
296
+ self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
297
+
298
+ def forward(self, x):
299
+ return self.norm(self.conv(self.pool(x)))
300
+
301
+
302
+ class ResNetStage(nn.Module):
303
+ """ResNet Stage."""
304
+ def __init__(
305
+ self,
306
+ in_chs,
307
+ out_chs,
308
+ stride,
309
+ dilation,
310
+ depth,
311
+ bottle_ratio=0.25,
312
+ groups=1,
313
+ avg_down=False,
314
+ block_dpr=None,
315
+ block_fn=PreActBottleneck,
316
+ act_layer=None,
317
+ conv_layer=None,
318
+ norm_layer=None,
319
+ **block_kwargs,
320
+ ):
321
+ super(ResNetStage, self).__init__()
322
+ first_dilation = 1 if dilation in (1, 2) else 2
323
+ layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer)
324
+ proj_layer = DownsampleAvg if avg_down else DownsampleConv
325
+ prev_chs = in_chs
326
+ self.blocks = nn.Sequential()
327
+ for block_idx in range(depth):
328
+ drop_path_rate = block_dpr[block_idx] if block_dpr else 0.
329
+ stride = stride if block_idx == 0 else 1
330
+ self.blocks.add_module(str(block_idx), block_fn(
331
+ prev_chs,
332
+ out_chs,
333
+ stride=stride,
334
+ dilation=dilation,
335
+ bottle_ratio=bottle_ratio,
336
+ groups=groups,
337
+ first_dilation=first_dilation,
338
+ proj_layer=proj_layer,
339
+ drop_path_rate=drop_path_rate,
340
+ **layer_kwargs,
341
+ **block_kwargs,
342
+ ))
343
+ prev_chs = out_chs
344
+ first_dilation = dilation
345
+ proj_layer = None
346
+
347
+ def forward(self, x):
348
+ x = self.blocks(x)
349
+ return x
350
+
351
+
352
+ def is_stem_deep(stem_type):
353
+ return any([s in stem_type for s in ('deep', 'tiered')])
354
+
355
+
356
+ def create_resnetv2_stem(
357
+ in_chs,
358
+ out_chs=64,
359
+ stem_type='',
360
+ preact=True,
361
+ conv_layer=StdConv2d,
362
+ norm_layer=partial(GroupNormAct, num_groups=32),
363
+ ):
364
+ stem = OrderedDict()
365
+ assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered')
366
+
367
+ # NOTE conv padding mode can be changed by overriding the conv_layer def
368
+ if is_stem_deep(stem_type):
369
+ # A 3 deep 3x3 conv stack as in ResNet V1D models
370
+ if 'tiered' in stem_type:
371
+ stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py
372
+ else:
373
+ stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets
374
+ stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2)
375
+ stem['norm1'] = norm_layer(stem_chs[0])
376
+ stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1)
377
+ stem['norm2'] = norm_layer(stem_chs[1])
378
+ stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1)
379
+ if not preact:
380
+ stem['norm3'] = norm_layer(out_chs)
381
+ else:
382
+ # The usual 7x7 stem conv
383
+ stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
384
+ if not preact:
385
+ stem['norm'] = norm_layer(out_chs)
386
+
387
+ if 'fixed' in stem_type:
388
+ # 'fixed' SAME padding approximation that is used in BiT models
389
+ stem['pad'] = nn.ConstantPad2d(1, 0.)
390
+ stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
391
+ elif 'same' in stem_type:
392
+ # full, input size based 'SAME' padding, used in ViT Hybrid model
393
+ stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same')
394
+ else:
395
+ # the usual PyTorch symmetric padding
396
+ stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
397
+
398
+ return nn.Sequential(stem)
399
+
400
+
401
+ class ResNetV2(nn.Module):
402
+ """Implementation of Pre-activation (v2) ResNet mode.
403
+ """
404
+
405
+ def __init__(
406
+ self,
407
+ layers,
408
+ channels=(256, 512, 1024, 2048),
409
+ num_classes=1000,
410
+ in_chans=3,
411
+ global_pool='avg',
412
+ output_stride=32,
413
+ width_factor=1,
414
+ stem_chs=64,
415
+ stem_type='',
416
+ avg_down=False,
417
+ preact=True,
418
+ basic=False,
419
+ bottle_ratio=0.25,
420
+ act_layer=nn.ReLU,
421
+ norm_layer=partial(GroupNormAct, num_groups=32),
422
+ conv_layer=StdConv2d,
423
+ drop_rate=0.,
424
+ drop_path_rate=0.,
425
+ zero_init_last=False,
426
+ ):
427
+ """
428
+ Args:
429
+ layers (List[int]) : number of layers in each block
430
+ channels (List[int]) : number of channels in each block:
431
+ num_classes (int): number of classification classes (default 1000)
432
+ in_chans (int): number of input (color) channels. (default 3)
433
+ global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
434
+ output_stride (int): output stride of the network, 32, 16, or 8. (default 32)
435
+ width_factor (int): channel (width) multiplication factor
436
+ stem_chs (int): stem width (default: 64)
437
+ stem_type (str): stem type (default: '' == 7x7)
438
+ avg_down (bool): average pooling in residual downsampling (default: False)
439
+ preact (bool): pre-activiation (default: True)
440
+ act_layer (Union[str, nn.Module]): activation layer
441
+ norm_layer (Union[str, nn.Module]): normalization layer
442
+ conv_layer (nn.Module): convolution module
443
+ drop_rate: classifier dropout rate (default: 0.)
444
+ drop_path_rate: stochastic depth rate (default: 0.)
445
+ zero_init_last: zero-init last weight in residual path (default: False)
446
+ """
447
+ super().__init__()
448
+ self.num_classes = num_classes
449
+ self.drop_rate = drop_rate
450
+ wf = width_factor
451
+ norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
452
+ act_layer = get_act_layer(act_layer)
453
+
454
+ self.feature_info = []
455
+ stem_chs = make_divisible(stem_chs * wf)
456
+ self.stem = create_resnetv2_stem(
457
+ in_chans,
458
+ stem_chs,
459
+ stem_type,
460
+ preact,
461
+ conv_layer=conv_layer,
462
+ norm_layer=norm_layer,
463
+ )
464
+ stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm'
465
+ self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat))
466
+
467
+ prev_chs = stem_chs
468
+ curr_stride = 4
469
+ dilation = 1
470
+ block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
471
+ if preact:
472
+ block_fn = PreActBasic if basic else PreActBottleneck
473
+ else:
474
+ assert not basic
475
+ block_fn = Bottleneck
476
+ self.stages = nn.Sequential()
477
+ for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)):
478
+ out_chs = make_divisible(c * wf)
479
+ stride = 1 if stage_idx == 0 else 2
480
+ if curr_stride >= output_stride:
481
+ dilation *= stride
482
+ stride = 1
483
+ stage = ResNetStage(
484
+ prev_chs,
485
+ out_chs,
486
+ stride=stride,
487
+ dilation=dilation,
488
+ depth=d,
489
+ bottle_ratio=bottle_ratio,
490
+ avg_down=avg_down,
491
+ act_layer=act_layer,
492
+ conv_layer=conv_layer,
493
+ norm_layer=norm_layer,
494
+ block_dpr=bdpr,
495
+ block_fn=block_fn,
496
+ )
497
+ prev_chs = out_chs
498
+ curr_stride *= stride
499
+ self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')]
500
+ self.stages.add_module(str(stage_idx), stage)
501
+
502
+ self.num_features = self.head_hidden_size = prev_chs
503
+ self.norm = norm_layer(self.num_features) if preact else nn.Identity()
504
+ self.head = ClassifierHead(
505
+ self.num_features,
506
+ num_classes,
507
+ pool_type=global_pool,
508
+ drop_rate=self.drop_rate,
509
+ use_conv=True,
510
+ )
511
+
512
+ self.init_weights(zero_init_last=zero_init_last)
513
+ self.grad_checkpointing = False
514
+
515
+ @torch.jit.ignore
516
+ def init_weights(self, zero_init_last=True):
517
+ named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
518
+
519
+ @torch.jit.ignore()
520
+ def load_pretrained(self, checkpoint_path, prefix='resnet/'):
521
+ _load_weights(self, checkpoint_path, prefix)
522
+
523
+ @torch.jit.ignore
524
+ def group_matcher(self, coarse=False):
525
+ matcher = dict(
526
+ stem=r'^stem',
527
+ blocks=r'^stages\.(\d+)' if coarse else [
528
+ (r'^stages\.(\d+)\.blocks\.(\d+)', None),
529
+ (r'^norm', (99999,))
530
+ ]
531
+ )
532
+ return matcher
533
+
534
+ @torch.jit.ignore
535
+ def set_grad_checkpointing(self, enable=True):
536
+ self.grad_checkpointing = enable
537
+
538
+ @torch.jit.ignore
539
+ def get_classifier(self) -> nn.Module:
540
+ return self.head.fc
541
+
542
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
543
+ self.num_classes = num_classes
544
+ self.head.reset(num_classes, global_pool)
545
+
546
+ def forward_features(self, x):
547
+ x = self.stem(x)
548
+ if self.grad_checkpointing and not torch.jit.is_scripting():
549
+ x = checkpoint_seq(self.stages, x, flatten=True)
550
+ else:
551
+ x = self.stages(x)
552
+ x = self.norm(x)
553
+ return x
554
+
555
+ def forward_head(self, x, pre_logits: bool = False):
556
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
557
+
558
+ def forward(self, x):
559
+ x = self.forward_features(x)
560
+ x = self.forward_head(x)
561
+ return x
562
+
563
+
564
+ def _init_weights(module: nn.Module, name: str = '', zero_init_last=True):
565
+ if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)):
566
+ nn.init.normal_(module.weight, mean=0.0, std=0.01)
567
+ nn.init.zeros_(module.bias)
568
+ elif isinstance(module, nn.Conv2d):
569
+ nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
570
+ if module.bias is not None:
571
+ nn.init.zeros_(module.bias)
572
+ elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)):
573
+ nn.init.ones_(module.weight)
574
+ nn.init.zeros_(module.bias)
575
+ elif zero_init_last and hasattr(module, 'zero_init_last'):
576
+ module.zero_init_last()
577
+
578
+
579
+ @torch.no_grad()
580
+ def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'):
581
+ import numpy as np
582
+
583
+ def t2p(conv_weights):
584
+ """Possibly convert HWIO to OIHW."""
585
+ if conv_weights.ndim == 4:
586
+ conv_weights = conv_weights.transpose([3, 2, 0, 1])
587
+ return torch.from_numpy(conv_weights)
588
+
589
+ weights = np.load(checkpoint_path)
590
+ stem_conv_w = adapt_input_conv(
591
+ model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel']))
592
+ model.stem.conv.weight.copy_(stem_conv_w)
593
+ model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma']))
594
+ model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta']))
595
+ if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \
596
+ model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]:
597
+ model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel']))
598
+ model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias']))
599
+ for i, (sname, stage) in enumerate(model.stages.named_children()):
600
+ for j, (bname, block) in enumerate(stage.blocks.named_children()):
601
+ cname = 'standardized_conv2d'
602
+ block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/'
603
+ block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel']))
604
+ block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel']))
605
+ block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel']))
606
+ block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma']))
607
+ block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma']))
608
+ block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma']))
609
+ block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta']))
610
+ block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta']))
611
+ block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta']))
612
+ if block.downsample is not None:
613
+ w = weights[f'{block_prefix}a/proj/{cname}/kernel']
614
+ block.downsample.conv.weight.copy_(t2p(w))
615
+
616
+
617
+ def _create_resnetv2(variant, pretrained=False, **kwargs):
618
+ feature_cfg = dict(flatten_sequential=True)
619
+ return build_model_with_cfg(
620
+ ResNetV2, variant, pretrained,
621
+ feature_cfg=feature_cfg,
622
+ **kwargs,
623
+ )
624
+
625
+
626
+ def _create_resnetv2_bit(variant, pretrained=False, **kwargs):
627
+ return _create_resnetv2(
628
+ variant,
629
+ pretrained=pretrained,
630
+ stem_type='fixed',
631
+ conv_layer=partial(StdConv2d, eps=1e-8),
632
+ **kwargs,
633
+ )
634
+
635
+
636
+ def _cfg(url='', **kwargs):
637
+ return {
638
+ 'url': url,
639
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
640
+ 'crop_pct': 0.875, 'interpolation': 'bilinear',
641
+ 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
642
+ 'first_conv': 'stem.conv', 'classifier': 'head.fc',
643
+ **kwargs
644
+ }
645
+
646
+
647
+ default_cfgs = generate_default_cfgs({
648
+ # Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
649
+ 'resnetv2_50x1_bit.goog_distilled_in1k': _cfg(
650
+ hf_hub_id='timm/',
651
+ interpolation='bicubic', custom_load=True),
652
+ 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg(
653
+ hf_hub_id='timm/',
654
+ interpolation='bicubic', custom_load=True),
655
+ 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg(
656
+ hf_hub_id='timm/',
657
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True),
658
+
659
+ # pretrained on imagenet21k, finetuned on imagenet1k
660
+ 'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg(
661
+ hf_hub_id='timm/',
662
+ input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
663
+ 'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg(
664
+ hf_hub_id='timm/',
665
+ input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
666
+ 'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg(
667
+ hf_hub_id='timm/',
668
+ input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
669
+ 'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg(
670
+ hf_hub_id='timm/',
671
+ input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
672
+ 'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg(
673
+ hf_hub_id='timm/',
674
+ input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
675
+ 'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg(
676
+ hf_hub_id='timm/',
677
+ input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480?
678
+
679
+ # trained on imagenet-21k
680
+ 'resnetv2_50x1_bit.goog_in21k': _cfg(
681
+ hf_hub_id='timm/',
682
+ num_classes=21843, custom_load=True),
683
+ 'resnetv2_50x3_bit.goog_in21k': _cfg(
684
+ hf_hub_id='timm/',
685
+ num_classes=21843, custom_load=True),
686
+ 'resnetv2_101x1_bit.goog_in21k': _cfg(
687
+ hf_hub_id='timm/',
688
+ num_classes=21843, custom_load=True),
689
+ 'resnetv2_101x3_bit.goog_in21k': _cfg(
690
+ hf_hub_id='timm/',
691
+ num_classes=21843, custom_load=True),
692
+ 'resnetv2_152x2_bit.goog_in21k': _cfg(
693
+ hf_hub_id='timm/',
694
+ num_classes=21843, custom_load=True),
695
+ 'resnetv2_152x4_bit.goog_in21k': _cfg(
696
+ hf_hub_id='timm/',
697
+ num_classes=21843, custom_load=True),
698
+
699
+ 'resnetv2_18.ra4_e3600_r224_in1k': _cfg(
700
+ hf_hub_id='timm/',
701
+ interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0),
702
+ 'resnetv2_18d.ra4_e3600_r224_in1k': _cfg(
703
+ hf_hub_id='timm/',
704
+ interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0,
705
+ first_conv='stem.conv1'),
706
+ 'resnetv2_34.ra4_e3600_r224_in1k': _cfg(
707
+ hf_hub_id='timm/',
708
+ interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0),
709
+ 'resnetv2_34d.ra4_e3600_r224_in1k': _cfg(
710
+ hf_hub_id='timm/',
711
+ interpolation='bicubic', crop_pct=0.9, test_input_size=(3, 288, 288), test_crop_pct=1.0,
712
+ first_conv='stem.conv1'),
713
+ 'resnetv2_34d.ra4_e3600_r384_in1k': _cfg(
714
+ hf_hub_id='timm/',
715
+ crop_pct=1.0, input_size=(3, 384, 384), pool_size=(12, 12), test_input_size=(3, 448, 448),
716
+ interpolation='bicubic', first_conv='stem.conv1'),
717
+ 'resnetv2_50.a1h_in1k': _cfg(
718
+ hf_hub_id='timm/',
719
+ interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
720
+ 'resnetv2_50d.untrained': _cfg(
721
+ interpolation='bicubic', first_conv='stem.conv1'),
722
+ 'resnetv2_50t.untrained': _cfg(
723
+ interpolation='bicubic', first_conv='stem.conv1'),
724
+ 'resnetv2_101.a1h_in1k': _cfg(
725
+ hf_hub_id='timm/',
726
+ interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
727
+ 'resnetv2_101d.untrained': _cfg(
728
+ interpolation='bicubic', first_conv='stem.conv1'),
729
+ 'resnetv2_152.untrained': _cfg(
730
+ interpolation='bicubic'),
731
+ 'resnetv2_152d.untrained': _cfg(
732
+ interpolation='bicubic', first_conv='stem.conv1'),
733
+
734
+ 'resnetv2_50d_gn.ah_in1k': _cfg(
735
+ hf_hub_id='timm/',
736
+ interpolation='bicubic', first_conv='stem.conv1',
737
+ crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
738
+ 'resnetv2_50d_evos.ah_in1k': _cfg(
739
+ hf_hub_id='timm/',
740
+ interpolation='bicubic', first_conv='stem.conv1',
741
+ crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
742
+ 'resnetv2_50d_frn.untrained': _cfg(
743
+ interpolation='bicubic', first_conv='stem.conv1'),
744
+ })
745
+
746
+
747
+ @register_model
748
+ def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2:
749
+ return _create_resnetv2_bit(
750
+ 'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs)
751
+
752
+
753
+ @register_model
754
+ def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2:
755
+ return _create_resnetv2_bit(
756
+ 'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs)
757
+
758
+
759
+ @register_model
760
+ def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2:
761
+ return _create_resnetv2_bit(
762
+ 'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs)
763
+
764
+
765
+ @register_model
766
+ def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2:
767
+ return _create_resnetv2_bit(
768
+ 'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs)
769
+
770
+
771
+ @register_model
772
+ def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2:
773
+ return _create_resnetv2_bit(
774
+ 'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs)
775
+
776
+
777
+ @register_model
778
+ def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2:
779
+ return _create_resnetv2_bit(
780
+ 'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs)
781
+
782
+
783
+ @register_model
784
+ def resnetv2_18(pretrained=False, **kwargs) -> ResNetV2:
785
+ model_args = dict(
786
+ layers=[2, 2, 2, 2], channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0,
787
+ conv_layer=create_conv2d, norm_layer=BatchNormAct2d
788
+ )
789
+ return _create_resnetv2('resnetv2_18', pretrained=pretrained, **dict(model_args, **kwargs))
790
+
791
+
792
+ @register_model
793
+ def resnetv2_18d(pretrained=False, **kwargs) -> ResNetV2:
794
+ model_args = dict(
795
+ layers=[2, 2, 2, 2], channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0,
796
+ conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True
797
+ )
798
+ return _create_resnetv2('resnetv2_18d', pretrained=pretrained, **dict(model_args, **kwargs))
799
+
800
+
801
+ @register_model
802
+ def resnetv2_34(pretrained=False, **kwargs) -> ResNetV2:
803
+ model_args = dict(
804
+ layers=(3, 4, 6, 3), channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0,
805
+ conv_layer=create_conv2d, norm_layer=BatchNormAct2d
806
+ )
807
+ return _create_resnetv2('resnetv2_34', pretrained=pretrained, **dict(model_args, **kwargs))
808
+
809
+
810
+ @register_model
811
+ def resnetv2_34d(pretrained=False, **kwargs) -> ResNetV2:
812
+ model_args = dict(
813
+ layers=(3, 4, 6, 3), channels=(64, 128, 256, 512), basic=True, bottle_ratio=1.0,
814
+ conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True
815
+ )
816
+ return _create_resnetv2('resnetv2_34d', pretrained=pretrained, **dict(model_args, **kwargs))
817
+
818
+
819
+ @register_model
820
+ def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2:
821
+ model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
822
+ return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs))
823
+
824
+
825
+ @register_model
826
+ def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2:
827
+ model_args = dict(
828
+ layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
829
+ stem_type='deep', avg_down=True)
830
+ return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs))
831
+
832
+
833
+ @register_model
834
+ def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2:
835
+ model_args = dict(
836
+ layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
837
+ stem_type='tiered', avg_down=True)
838
+ return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs))
839
+
840
+
841
+ @register_model
842
+ def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2:
843
+ model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
844
+ return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs))
845
+
846
+
847
+ @register_model
848
+ def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2:
849
+ model_args = dict(
850
+ layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
851
+ stem_type='deep', avg_down=True)
852
+ return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs))
853
+
854
+
855
+ @register_model
856
+ def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2:
857
+ model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
858
+ return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs))
859
+
860
+
861
+ @register_model
862
+ def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2:
863
+ model_args = dict(
864
+ layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
865
+ stem_type='deep', avg_down=True)
866
+ return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs))
867
+
868
+
869
+ # Experimental configs (may change / be removed)
870
+
871
+ @register_model
872
+ def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2:
873
+ model_args = dict(
874
+ layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct,
875
+ stem_type='deep', avg_down=True)
876
+ return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs))
877
+
878
+
879
+ @register_model
880
+ def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2:
881
+ model_args = dict(
882
+ layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0,
883
+ stem_type='deep', avg_down=True)
884
+ return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs))
885
+
886
+
887
+ @register_model
888
+ def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2:
889
+ model_args = dict(
890
+ layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d,
891
+ stem_type='deep', avg_down=True)
892
+ return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs))
893
+
894
+
895
+ register_model_deprecations(__name__, {
896
+ 'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k',
897
+ 'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k',
898
+ 'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k',
899
+ 'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k',
900
+ 'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k',
901
+ 'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k',
902
+ 'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k',
903
+ 'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k',
904
+ 'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k',
905
+ 'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k',
906
+ 'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k',
907
+ 'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k',
908
+ 'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k',
909
+ 'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k',
910
+ 'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384',
911
+ })
pytorch-image-models/timm/models/rexnet.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ ReXNet
2
+
3
+ A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -
4
+ https://arxiv.org/abs/2007.00992
5
+
6
+ Adapted from original impl at https://github.com/clovaai/rexnet
7
+ Copyright (c) 2020-present NAVER Corp. MIT license
8
+
9
+ Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman
10
+ Copyright 2020 Ross Wightman
11
+ """
12
+
13
+ from functools import partial
14
+ from math import ceil
15
+ from typing import Optional
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+
20
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
21
+ from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule
22
+ from ._builder import build_model_with_cfg
23
+ from ._efficientnet_builder import efficientnet_init_weights
24
+ from ._manipulate import checkpoint_seq
25
+ from ._registry import generate_default_cfgs, register_model
26
+
27
+ __all__ = ['RexNet'] # model_registry will add each entrypoint fn to this
28
+
29
+
30
+ SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d)
31
+
32
+
33
+ class LinearBottleneck(nn.Module):
34
+ def __init__(
35
+ self,
36
+ in_chs,
37
+ out_chs,
38
+ stride,
39
+ dilation=(1, 1),
40
+ exp_ratio=1.0,
41
+ se_ratio=0.,
42
+ ch_div=1,
43
+ act_layer='swish',
44
+ dw_act_layer='relu6',
45
+ drop_path=None,
46
+ ):
47
+ super(LinearBottleneck, self).__init__()
48
+ self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs
49
+ self.in_channels = in_chs
50
+ self.out_channels = out_chs
51
+
52
+ if exp_ratio != 1.:
53
+ dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)
54
+ self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer)
55
+ else:
56
+ dw_chs = in_chs
57
+ self.conv_exp = None
58
+
59
+ self.conv_dw = ConvNormAct(
60
+ dw_chs,
61
+ dw_chs,
62
+ kernel_size=3,
63
+ stride=stride,
64
+ dilation=dilation[0],
65
+ groups=dw_chs,
66
+ apply_act=False,
67
+ )
68
+ if se_ratio > 0:
69
+ self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div))
70
+ else:
71
+ self.se = None
72
+ self.act_dw = create_act_layer(dw_act_layer)
73
+
74
+ self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False)
75
+ self.drop_path = drop_path
76
+
77
+ def feat_channels(self, exp=False):
78
+ return self.conv_dw.out_channels if exp else self.out_channels
79
+
80
+ def forward(self, x):
81
+ shortcut = x
82
+ if self.conv_exp is not None:
83
+ x = self.conv_exp(x)
84
+ x = self.conv_dw(x)
85
+ if self.se is not None:
86
+ x = self.se(x)
87
+ x = self.act_dw(x)
88
+ x = self.conv_pwl(x)
89
+ if self.use_shortcut:
90
+ if self.drop_path is not None:
91
+ x = self.drop_path(x)
92
+ x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1)
93
+ return x
94
+
95
+
96
+ def _block_cfg(
97
+ width_mult=1.0,
98
+ depth_mult=1.0,
99
+ initial_chs=16,
100
+ final_chs=180,
101
+ se_ratio=0.,
102
+ ch_div=1,
103
+ ):
104
+ layers = [1, 2, 2, 3, 3, 5]
105
+ strides = [1, 2, 2, 2, 1, 2]
106
+ layers = [ceil(element * depth_mult) for element in layers]
107
+ strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])
108
+ exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])
109
+ depth = sum(layers[:]) * 3
110
+ base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs
111
+
112
+ # The following channel configuration is a simple instance to make each layer become an expand layer.
113
+ out_chs_list = []
114
+ for i in range(depth // 3):
115
+ out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))
116
+ base_chs += final_chs / (depth // 3 * 1.0)
117
+
118
+ se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:])
119
+
120
+ return list(zip(out_chs_list, exp_ratios, strides, se_ratios))
121
+
122
+
123
+ def _build_blocks(
124
+ block_cfg,
125
+ prev_chs,
126
+ width_mult,
127
+ ch_div=1,
128
+ output_stride=32,
129
+ act_layer='swish',
130
+ dw_act_layer='relu6',
131
+ drop_path_rate=0.,
132
+ ):
133
+ feat_chs = [prev_chs]
134
+ feature_info = []
135
+ curr_stride = 2
136
+ dilation = 1
137
+ features = []
138
+ num_blocks = len(block_cfg)
139
+ for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg):
140
+ next_dilation = dilation
141
+ if stride > 1:
142
+ fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'
143
+ feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]
144
+ if curr_stride >= output_stride:
145
+ next_dilation = dilation * stride
146
+ stride = 1
147
+ block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule
148
+ drop_path = DropPath(block_dpr) if block_dpr > 0. else None
149
+ features.append(LinearBottleneck(
150
+ in_chs=prev_chs,
151
+ out_chs=chs,
152
+ exp_ratio=exp_ratio,
153
+ stride=stride,
154
+ dilation=(dilation, next_dilation),
155
+ se_ratio=se_ratio,
156
+ ch_div=ch_div,
157
+ act_layer=act_layer,
158
+ dw_act_layer=dw_act_layer,
159
+ drop_path=drop_path,
160
+ ))
161
+ curr_stride *= stride
162
+ dilation = next_dilation
163
+ prev_chs = chs
164
+ feat_chs += [features[-1].feat_channels()]
165
+ pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)
166
+ feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')]
167
+ features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer))
168
+ return features, feature_info
169
+
170
+
171
+ class RexNet(nn.Module):
172
+ def __init__(
173
+ self,
174
+ in_chans=3,
175
+ num_classes=1000,
176
+ global_pool='avg',
177
+ output_stride=32,
178
+ initial_chs=16,
179
+ final_chs=180,
180
+ width_mult=1.0,
181
+ depth_mult=1.0,
182
+ se_ratio=1/12.,
183
+ ch_div=1,
184
+ act_layer='swish',
185
+ dw_act_layer='relu6',
186
+ drop_rate=0.2,
187
+ drop_path_rate=0.,
188
+ ):
189
+ super(RexNet, self).__init__()
190
+ self.num_classes = num_classes
191
+ self.drop_rate = drop_rate
192
+ self.grad_checkpointing = False
193
+
194
+ assert output_stride in (32, 16, 8)
195
+ stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32
196
+ stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)
197
+ self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer)
198
+
199
+ block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div)
200
+ features, self.feature_info = _build_blocks(
201
+ block_cfg,
202
+ stem_chs,
203
+ width_mult,
204
+ ch_div,
205
+ output_stride,
206
+ act_layer,
207
+ dw_act_layer,
208
+ drop_path_rate,
209
+ )
210
+ self.num_features = self.head_hidden_size = features[-1].out_channels
211
+ self.features = nn.Sequential(*features)
212
+
213
+ self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)
214
+
215
+ efficientnet_init_weights(self)
216
+
217
+ @torch.jit.ignore
218
+ def group_matcher(self, coarse=False):
219
+ matcher = dict(
220
+ stem=r'^stem',
221
+ blocks=r'^features\.(\d+)',
222
+ )
223
+ return matcher
224
+
225
+ @torch.jit.ignore
226
+ def set_grad_checkpointing(self, enable=True):
227
+ self.grad_checkpointing = enable
228
+
229
+ @torch.jit.ignore
230
+ def get_classifier(self) -> nn.Module:
231
+ return self.head.fc
232
+
233
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
234
+ self.num_classes = num_classes
235
+ self.head.reset(num_classes, global_pool)
236
+
237
+ def forward_features(self, x):
238
+ x = self.stem(x)
239
+ if self.grad_checkpointing and not torch.jit.is_scripting():
240
+ x = checkpoint_seq(self.features, x, flatten=True)
241
+ else:
242
+ x = self.features(x)
243
+ return x
244
+
245
+ def forward_head(self, x, pre_logits: bool = False):
246
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
247
+
248
+ def forward(self, x):
249
+ x = self.forward_features(x)
250
+ x = self.forward_head(x)
251
+ return x
252
+
253
+
254
+ def _create_rexnet(variant, pretrained, **kwargs):
255
+ feature_cfg = dict(flatten_sequential=True)
256
+ return build_model_with_cfg(
257
+ RexNet,
258
+ variant,
259
+ pretrained,
260
+ feature_cfg=feature_cfg,
261
+ **kwargs,
262
+ )
263
+
264
+
265
+ def _cfg(url='', **kwargs):
266
+ return {
267
+ 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
268
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
269
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
270
+ 'first_conv': 'stem.conv', 'classifier': 'head.fc',
271
+ 'license': 'mit', **kwargs
272
+ }
273
+
274
+
275
+ default_cfgs = generate_default_cfgs({
276
+ 'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'),
277
+ 'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'),
278
+ 'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'),
279
+ 'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'),
280
+ 'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'),
281
+ 'rexnetr_100.untrained': _cfg(),
282
+ 'rexnetr_130.untrained': _cfg(),
283
+ 'rexnetr_150.untrained': _cfg(),
284
+ 'rexnetr_200.sw_in12k_ft_in1k': _cfg(
285
+ hf_hub_id='timm/',
286
+ crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
287
+ 'rexnetr_300.sw_in12k_ft_in1k': _cfg(
288
+ hf_hub_id='timm/',
289
+ crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
290
+ 'rexnetr_200.sw_in12k': _cfg(
291
+ hf_hub_id='timm/',
292
+ num_classes=11821,
293
+ crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
294
+ 'rexnetr_300.sw_in12k': _cfg(
295
+ hf_hub_id='timm/',
296
+ num_classes=11821,
297
+ crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
298
+ })
299
+
300
+
301
+ @register_model
302
+ def rexnet_100(pretrained=False, **kwargs) -> RexNet:
303
+ """ReXNet V1 1.0x"""
304
+ return _create_rexnet('rexnet_100', pretrained, **kwargs)
305
+
306
+
307
+ @register_model
308
+ def rexnet_130(pretrained=False, **kwargs) -> RexNet:
309
+ """ReXNet V1 1.3x"""
310
+ return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)
311
+
312
+
313
+ @register_model
314
+ def rexnet_150(pretrained=False, **kwargs) -> RexNet:
315
+ """ReXNet V1 1.5x"""
316
+ return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)
317
+
318
+
319
+ @register_model
320
+ def rexnet_200(pretrained=False, **kwargs) -> RexNet:
321
+ """ReXNet V1 2.0x"""
322
+ return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)
323
+
324
+
325
+ @register_model
326
+ def rexnet_300(pretrained=False, **kwargs) -> RexNet:
327
+ """ReXNet V1 3.0x"""
328
+ return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs)
329
+
330
+
331
+ @register_model
332
+ def rexnetr_100(pretrained=False, **kwargs) -> RexNet:
333
+ """ReXNet V1 1.0x w/ rounded (mod 8) channels"""
334
+ return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)
335
+
336
+
337
+ @register_model
338
+ def rexnetr_130(pretrained=False, **kwargs) -> RexNet:
339
+ """ReXNet V1 1.3x w/ rounded (mod 8) channels"""
340
+ return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)
341
+
342
+
343
+ @register_model
344
+ def rexnetr_150(pretrained=False, **kwargs) -> RexNet:
345
+ """ReXNet V1 1.5x w/ rounded (mod 8) channels"""
346
+ return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)
347
+
348
+
349
+ @register_model
350
+ def rexnetr_200(pretrained=False, **kwargs) -> RexNet:
351
+ """ReXNet V1 2.0x w/ rounded (mod 8) channels"""
352
+ return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)
353
+
354
+
355
+ @register_model
356
+ def rexnetr_300(pretrained=False, **kwargs) -> RexNet:
357
+ """ReXNet V1 3.0x w/ rounded (mod 16) channels"""
358
+ return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
pytorch-image-models/timm/models/sknet.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Selective Kernel Networks (ResNet base)
2
+
3
+ Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
4
+
5
+ This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
6
+ and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
7
+ to the original paper with some modifications of my own to better balance param count vs accuracy.
8
+
9
+ Hacked together by / Copyright 2020 Ross Wightman
10
+ """
11
+ import math
12
+
13
+ from torch import nn as nn
14
+
15
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
16
+ from timm.layers import SelectiveKernel, ConvNormAct, create_attn
17
+ from ._builder import build_model_with_cfg
18
+ from ._registry import register_model, generate_default_cfgs
19
+ from .resnet import ResNet
20
+
21
+
22
+ class SelectiveKernelBasic(nn.Module):
23
+ expansion = 1
24
+
25
+ def __init__(
26
+ self,
27
+ inplanes,
28
+ planes,
29
+ stride=1,
30
+ downsample=None,
31
+ cardinality=1,
32
+ base_width=64,
33
+ sk_kwargs=None,
34
+ reduce_first=1,
35
+ dilation=1,
36
+ first_dilation=None,
37
+ act_layer=nn.ReLU,
38
+ norm_layer=nn.BatchNorm2d,
39
+ attn_layer=None,
40
+ aa_layer=None,
41
+ drop_block=None,
42
+ drop_path=None,
43
+ ):
44
+ super(SelectiveKernelBasic, self).__init__()
45
+
46
+ sk_kwargs = sk_kwargs or {}
47
+ conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
48
+ assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
49
+ assert base_width == 64, 'BasicBlock doest not support changing base width'
50
+ first_planes = planes // reduce_first
51
+ outplanes = planes * self.expansion
52
+ first_dilation = first_dilation or dilation
53
+
54
+ self.conv1 = SelectiveKernel(
55
+ inplanes, first_planes, stride=stride, dilation=first_dilation,
56
+ aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
57
+ self.conv2 = ConvNormAct(
58
+ first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs)
59
+ self.se = create_attn(attn_layer, outplanes)
60
+ self.act = act_layer(inplace=True)
61
+ self.downsample = downsample
62
+ self.drop_path = drop_path
63
+
64
+ def zero_init_last(self):
65
+ if getattr(self.conv2.bn, 'weight', None) is not None:
66
+ nn.init.zeros_(self.conv2.bn.weight)
67
+
68
+ def forward(self, x):
69
+ shortcut = x
70
+ x = self.conv1(x)
71
+ x = self.conv2(x)
72
+ if self.se is not None:
73
+ x = self.se(x)
74
+ if self.drop_path is not None:
75
+ x = self.drop_path(x)
76
+ if self.downsample is not None:
77
+ shortcut = self.downsample(shortcut)
78
+ x += shortcut
79
+ x = self.act(x)
80
+ return x
81
+
82
+
83
+ class SelectiveKernelBottleneck(nn.Module):
84
+ expansion = 4
85
+
86
+ def __init__(
87
+ self,
88
+ inplanes,
89
+ planes,
90
+ stride=1,
91
+ downsample=None,
92
+ cardinality=1,
93
+ base_width=64,
94
+ sk_kwargs=None,
95
+ reduce_first=1,
96
+ dilation=1,
97
+ first_dilation=None,
98
+ act_layer=nn.ReLU,
99
+ norm_layer=nn.BatchNorm2d,
100
+ attn_layer=None,
101
+ aa_layer=None,
102
+ drop_block=None,
103
+ drop_path=None,
104
+ ):
105
+ super(SelectiveKernelBottleneck, self).__init__()
106
+
107
+ sk_kwargs = sk_kwargs or {}
108
+ conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
109
+ width = int(math.floor(planes * (base_width / 64)) * cardinality)
110
+ first_planes = width // reduce_first
111
+ outplanes = planes * self.expansion
112
+ first_dilation = first_dilation or dilation
113
+
114
+ self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
115
+ self.conv2 = SelectiveKernel(
116
+ first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
117
+ aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
118
+ self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs)
119
+ self.se = create_attn(attn_layer, outplanes)
120
+ self.act = act_layer(inplace=True)
121
+ self.downsample = downsample
122
+ self.drop_path = drop_path
123
+
124
+ def zero_init_last(self):
125
+ if getattr(self.conv3.bn, 'weight', None) is not None:
126
+ nn.init.zeros_(self.conv3.bn.weight)
127
+
128
+ def forward(self, x):
129
+ shortcut = x
130
+ x = self.conv1(x)
131
+ x = self.conv2(x)
132
+ x = self.conv3(x)
133
+ if self.se is not None:
134
+ x = self.se(x)
135
+ if self.drop_path is not None:
136
+ x = self.drop_path(x)
137
+ if self.downsample is not None:
138
+ shortcut = self.downsample(shortcut)
139
+ x += shortcut
140
+ x = self.act(x)
141
+ return x
142
+
143
+
144
+ def _create_skresnet(variant, pretrained=False, **kwargs):
145
+ return build_model_with_cfg(
146
+ ResNet,
147
+ variant,
148
+ pretrained,
149
+ **kwargs,
150
+ )
151
+
152
+
153
+ def _cfg(url='', **kwargs):
154
+ return {
155
+ 'url': url,
156
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
157
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
158
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
159
+ 'first_conv': 'conv1', 'classifier': 'fc',
160
+ **kwargs
161
+ }
162
+
163
+
164
+ default_cfgs = generate_default_cfgs({
165
+ 'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'),
166
+ 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'),
167
+ 'skresnet50.untrained': _cfg(),
168
+ 'skresnet50d.untrained': _cfg(
169
+ first_conv='conv1.0'),
170
+ 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'),
171
+ })
172
+
173
+
174
+ @register_model
175
+ def skresnet18(pretrained=False, **kwargs) -> ResNet:
176
+ """Constructs a Selective Kernel ResNet-18 model.
177
+
178
+ Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
179
+ variation splits the input channels to the selective convolutions to keep param count down.
180
+ """
181
+ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
182
+ model_args = dict(
183
+ block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
184
+ zero_init_last=False, **kwargs)
185
+ return _create_skresnet('skresnet18', pretrained, **model_args)
186
+
187
+
188
+ @register_model
189
+ def skresnet34(pretrained=False, **kwargs) -> ResNet:
190
+ """Constructs a Selective Kernel ResNet-34 model.
191
+
192
+ Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
193
+ variation splits the input channels to the selective convolutions to keep param count down.
194
+ """
195
+ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
196
+ model_args = dict(
197
+ block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
198
+ zero_init_last=False, **kwargs)
199
+ return _create_skresnet('skresnet34', pretrained, **model_args)
200
+
201
+
202
+ @register_model
203
+ def skresnet50(pretrained=False, **kwargs) -> ResNet:
204
+ """Constructs a Select Kernel ResNet-50 model.
205
+
206
+ Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
207
+ variation splits the input channels to the selective convolutions to keep param count down.
208
+ """
209
+ sk_kwargs = dict(split_input=True)
210
+ model_args = dict(
211
+ block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
212
+ zero_init_last=False, **kwargs)
213
+ return _create_skresnet('skresnet50', pretrained, **model_args)
214
+
215
+
216
+ @register_model
217
+ def skresnet50d(pretrained=False, **kwargs) -> ResNet:
218
+ """Constructs a Select Kernel ResNet-50-D model.
219
+
220
+ Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
221
+ variation splits the input channels to the selective convolutions to keep param count down.
222
+ """
223
+ sk_kwargs = dict(split_input=True)
224
+ model_args = dict(
225
+ block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
226
+ block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
227
+ return _create_skresnet('skresnet50d', pretrained, **model_args)
228
+
229
+
230
+ @register_model
231
+ def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet:
232
+ """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to
233
+ the SKNet-50 model in the Select Kernel Paper
234
+ """
235
+ sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False)
236
+ model_args = dict(
237
+ block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
238
+ block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
239
+ return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
240
+
pytorch-image-models/timm/models/swin_transformer_v2.py ADDED
@@ -0,0 +1,1088 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Swin Transformer V2
2
+ A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
3
+ - https://arxiv.org/abs/2111.09883
4
+
5
+ Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
6
+
7
+ Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
8
+ """
9
+ # --------------------------------------------------------
10
+ # Swin Transformer V2
11
+ # Copyright (c) 2022 Microsoft
12
+ # Licensed under The MIT License [see LICENSE for details]
13
+ # Written by Ze Liu
14
+ # --------------------------------------------------------
15
+ import math
16
+ from typing import Callable, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ import torch.utils.checkpoint as checkpoint
22
+
23
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
24
+ from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead,\
25
+ resample_patch_embed, ndgrid, get_act_layer, LayerType
26
+ from ._builder import build_model_with_cfg
27
+ from ._features import feature_take_indices
28
+ from ._features_fx import register_notrace_function
29
+ from ._registry import generate_default_cfgs, register_model, register_model_deprecations
30
+
31
+ __all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this
32
+
33
+ _int_or_tuple_2_t = Union[int, Tuple[int, int]]
34
+
35
+
36
+ def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor:
37
+ """
38
+ Args:
39
+ x: (B, H, W, C)
40
+ window_size (int): window size
41
+
42
+ Returns:
43
+ windows: (num_windows*B, window_size, window_size, C)
44
+ """
45
+ B, H, W, C = x.shape
46
+ x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
47
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
48
+ return windows
49
+
50
+
51
+ @register_notrace_function # reason: int argument is a Proxy
52
+ def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor:
53
+ """
54
+ Args:
55
+ windows: (num_windows * B, window_size[0], window_size[1], C)
56
+ window_size (Tuple[int, int]): Window size
57
+ img_size (Tuple[int, int]): Image size
58
+
59
+ Returns:
60
+ x: (B, H, W, C)
61
+ """
62
+ H, W = img_size
63
+ C = windows.shape[-1]
64
+ x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
65
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
66
+ return x
67
+
68
+
69
+ class WindowAttention(nn.Module):
70
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
71
+ It supports both of shifted and non-shifted window.
72
+
73
+ Args:
74
+ dim (int): Number of input channels.
75
+ window_size (tuple[int]): The height and width of the window.
76
+ num_heads (int): Number of attention heads.
77
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
78
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
79
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
80
+ pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ dim: int,
86
+ window_size: Tuple[int, int],
87
+ num_heads: int,
88
+ qkv_bias: bool = True,
89
+ qkv_bias_separate: bool = False,
90
+ attn_drop: float = 0.,
91
+ proj_drop: float = 0.,
92
+ pretrained_window_size: Tuple[int, int] = (0, 0),
93
+ ) -> None:
94
+ super().__init__()
95
+ self.dim = dim
96
+ self.window_size = window_size # Wh, Ww
97
+ self.pretrained_window_size = to_2tuple(pretrained_window_size)
98
+ self.num_heads = num_heads
99
+ self.qkv_bias_separate = qkv_bias_separate
100
+
101
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
102
+
103
+ # mlp to generate continuous relative position bias
104
+ self.cpb_mlp = nn.Sequential(
105
+ nn.Linear(2, 512, bias=True),
106
+ nn.ReLU(inplace=True),
107
+ nn.Linear(512, num_heads, bias=False)
108
+ )
109
+
110
+ self.qkv = nn.Linear(dim, dim * 3, bias=False)
111
+ if qkv_bias:
112
+ self.q_bias = nn.Parameter(torch.zeros(dim))
113
+ self.register_buffer('k_bias', torch.zeros(dim), persistent=False)
114
+ self.v_bias = nn.Parameter(torch.zeros(dim))
115
+ else:
116
+ self.q_bias = None
117
+ self.k_bias = None
118
+ self.v_bias = None
119
+ self.attn_drop = nn.Dropout(attn_drop)
120
+ self.proj = nn.Linear(dim, dim)
121
+ self.proj_drop = nn.Dropout(proj_drop)
122
+ self.softmax = nn.Softmax(dim=-1)
123
+
124
+ self._make_pair_wise_relative_positions()
125
+
126
+ def _make_pair_wise_relative_positions(self):
127
+ # get relative_coords_table
128
+ relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0]).to(torch.float32)
129
+ relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1]).to(torch.float32)
130
+ relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w))
131
+ relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
132
+ if self.pretrained_window_size[0] > 0:
133
+ relative_coords_table[:, :, :, 0] /= (self.pretrained_window_size[0] - 1)
134
+ relative_coords_table[:, :, :, 1] /= (self.pretrained_window_size[1] - 1)
135
+ else:
136
+ relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
137
+ relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
138
+ relative_coords_table *= 8 # normalize to -8, 8
139
+ relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
140
+ torch.abs(relative_coords_table) + 1.0) / math.log2(8)
141
+ self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
142
+
143
+ # get pair-wise relative position index for each token inside the window
144
+ coords_h = torch.arange(self.window_size[0])
145
+ coords_w = torch.arange(self.window_size[1])
146
+ coords = torch.stack(ndgrid(coords_h, coords_w)) # 2, Wh, Ww
147
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
148
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
149
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
150
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
151
+ relative_coords[:, :, 1] += self.window_size[1] - 1
152
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
153
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
154
+ self.register_buffer("relative_position_index", relative_position_index, persistent=False)
155
+
156
+ def set_window_size(self, window_size: Tuple[int, int]) -> None:
157
+ """Update window size & interpolate position embeddings
158
+ Args:
159
+ window_size (int): New window size
160
+ """
161
+ window_size = to_2tuple(window_size)
162
+ if window_size != self.window_size:
163
+ self.window_size = window_size
164
+ self._make_pair_wise_relative_positions()
165
+
166
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
167
+ """
168
+ Args:
169
+ x: input features with shape of (num_windows*B, N, C)
170
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
171
+ """
172
+ B_, N, C = x.shape
173
+
174
+ if self.q_bias is None:
175
+ qkv = self.qkv(x)
176
+ else:
177
+ qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias))
178
+ if self.qkv_bias_separate:
179
+ qkv = self.qkv(x)
180
+ qkv += qkv_bias
181
+ else:
182
+ qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias)
183
+ qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
184
+ q, k, v = qkv.unbind(0)
185
+
186
+ # cosine attention
187
+ attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
188
+ logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp()
189
+ attn = attn * logit_scale
190
+
191
+ relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
192
+ relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
193
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
194
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
195
+ relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
196
+ attn = attn + relative_position_bias.unsqueeze(0)
197
+
198
+ if mask is not None:
199
+ num_win = mask.shape[0]
200
+ attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
201
+ attn = attn.view(-1, self.num_heads, N, N)
202
+ attn = self.softmax(attn)
203
+ else:
204
+ attn = self.softmax(attn)
205
+
206
+ attn = self.attn_drop(attn)
207
+
208
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
209
+ x = self.proj(x)
210
+ x = self.proj_drop(x)
211
+ return x
212
+
213
+
214
+ class SwinTransformerV2Block(nn.Module):
215
+ """ Swin Transformer Block.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ dim: int,
221
+ input_resolution: _int_or_tuple_2_t,
222
+ num_heads: int,
223
+ window_size: _int_or_tuple_2_t = 7,
224
+ shift_size: _int_or_tuple_2_t = 0,
225
+ always_partition: bool = False,
226
+ dynamic_mask: bool = False,
227
+ mlp_ratio: float = 4.,
228
+ qkv_bias: bool = True,
229
+ proj_drop: float = 0.,
230
+ attn_drop: float = 0.,
231
+ drop_path: float = 0.,
232
+ act_layer: LayerType = "gelu",
233
+ norm_layer: nn.Module = nn.LayerNorm,
234
+ pretrained_window_size: _int_or_tuple_2_t = 0,
235
+ ):
236
+ """
237
+ Args:
238
+ dim: Number of input channels.
239
+ input_resolution: Input resolution.
240
+ num_heads: Number of attention heads.
241
+ window_size: Window size.
242
+ shift_size: Shift size for SW-MSA.
243
+ always_partition: Always partition into full windows and shift
244
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
245
+ qkv_bias: If True, add a learnable bias to query, key, value.
246
+ proj_drop: Dropout rate.
247
+ attn_drop: Attention dropout rate.
248
+ drop_path: Stochastic depth rate.
249
+ act_layer: Activation layer.
250
+ norm_layer: Normalization layer.
251
+ pretrained_window_size: Window size in pretraining.
252
+ """
253
+ super().__init__()
254
+ self.dim = dim
255
+ self.input_resolution = to_2tuple(input_resolution)
256
+ self.num_heads = num_heads
257
+ self.target_shift_size = to_2tuple(shift_size) # store for later resize
258
+ self.always_partition = always_partition
259
+ self.dynamic_mask = dynamic_mask
260
+ self.window_size, self.shift_size = self._calc_window_shift(window_size, shift_size)
261
+ self.window_area = self.window_size[0] * self.window_size[1]
262
+ self.mlp_ratio = mlp_ratio
263
+ act_layer = get_act_layer(act_layer)
264
+
265
+ self.attn = WindowAttention(
266
+ dim,
267
+ window_size=to_2tuple(self.window_size),
268
+ num_heads=num_heads,
269
+ qkv_bias=qkv_bias,
270
+ attn_drop=attn_drop,
271
+ proj_drop=proj_drop,
272
+ pretrained_window_size=to_2tuple(pretrained_window_size),
273
+ )
274
+ self.norm1 = norm_layer(dim)
275
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
276
+
277
+ self.mlp = Mlp(
278
+ in_features=dim,
279
+ hidden_features=int(dim * mlp_ratio),
280
+ act_layer=act_layer,
281
+ drop=proj_drop,
282
+ )
283
+ self.norm2 = norm_layer(dim)
284
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
285
+
286
+ self.register_buffer(
287
+ "attn_mask",
288
+ None if self.dynamic_mask else self.get_attn_mask(),
289
+ persistent=False,
290
+ )
291
+
292
+ def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
293
+ if any(self.shift_size):
294
+ # calculate attention mask for SW-MSA
295
+ if x is None:
296
+ img_mask = torch.zeros((1, *self.input_resolution, 1)) # 1 H W 1
297
+ else:
298
+ img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1
299
+ cnt = 0
300
+ for h in (
301
+ (0, -self.window_size[0]),
302
+ (-self.window_size[0], -self.shift_size[0]),
303
+ (-self.shift_size[0], None),
304
+ ):
305
+ for w in (
306
+ (0, -self.window_size[1]),
307
+ (-self.window_size[1], -self.shift_size[1]),
308
+ (-self.shift_size[1], None),
309
+ ):
310
+ img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt
311
+ cnt += 1
312
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
313
+ mask_windows = mask_windows.view(-1, self.window_area)
314
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
315
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
316
+ else:
317
+ attn_mask = None
318
+ return attn_mask
319
+
320
+ def _calc_window_shift(
321
+ self,
322
+ target_window_size: _int_or_tuple_2_t,
323
+ target_shift_size: Optional[_int_or_tuple_2_t] = None,
324
+ ) -> Tuple[Tuple[int, int], Tuple[int, int]]:
325
+ target_window_size = to_2tuple(target_window_size)
326
+ if target_shift_size is None:
327
+ # if passed value is None, recalculate from default window_size // 2 if it was active
328
+ target_shift_size = self.target_shift_size
329
+ if any(target_shift_size):
330
+ # if there was previously a non-zero shift, recalculate based on current window_size
331
+ target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2)
332
+ else:
333
+ target_shift_size = to_2tuple(target_shift_size)
334
+
335
+ if self.always_partition:
336
+ return target_window_size, target_shift_size
337
+
338
+ target_window_size = to_2tuple(target_window_size)
339
+ target_shift_size = to_2tuple(target_shift_size)
340
+ window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
341
+ shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
342
+ return tuple(window_size), tuple(shift_size)
343
+
344
+ def set_input_size(
345
+ self,
346
+ feat_size: Tuple[int, int],
347
+ window_size: Tuple[int, int],
348
+ always_partition: Optional[bool] = None,
349
+ ):
350
+ """ Updates the input resolution, window size.
351
+
352
+ Args:
353
+ feat_size (Tuple[int, int]): New input resolution
354
+ window_size (int): New window size
355
+ always_partition: Change always_partition attribute if not None
356
+ """
357
+ # Update input resolution
358
+ self.input_resolution = feat_size
359
+ if always_partition is not None:
360
+ self.always_partition = always_partition
361
+ self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
362
+ self.window_area = self.window_size[0] * self.window_size[1]
363
+ self.attn.set_window_size(self.window_size)
364
+ self.register_buffer(
365
+ "attn_mask",
366
+ None if self.dynamic_mask else self.get_attn_mask(),
367
+ persistent=False,
368
+ )
369
+
370
+ def _attn(self, x: torch.Tensor) -> torch.Tensor:
371
+ B, H, W, C = x.shape
372
+
373
+ # cyclic shift
374
+ has_shift = any(self.shift_size)
375
+ if has_shift:
376
+ shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2))
377
+ else:
378
+ shifted_x = x
379
+
380
+ pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
381
+ pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
382
+ shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h))
383
+ _, Hp, Wp, _ = shifted_x.shape
384
+
385
+ # partition windows
386
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
387
+ x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C
388
+
389
+ # W-MSA/SW-MSA
390
+ if getattr(self, 'dynamic_mask', False):
391
+ attn_mask = self.get_attn_mask(shifted_x)
392
+ else:
393
+ attn_mask = self.attn_mask
394
+ attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
395
+
396
+ # merge windows
397
+ attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
398
+ shifted_x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C
399
+ shifted_x = shifted_x[:, :H, :W, :].contiguous()
400
+
401
+ # reverse cyclic shift
402
+ if has_shift:
403
+ x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2))
404
+ else:
405
+ x = shifted_x
406
+ return x
407
+
408
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
409
+ B, H, W, C = x.shape
410
+ x = x + self.drop_path1(self.norm1(self._attn(x)))
411
+ x = x.reshape(B, -1, C)
412
+ x = x + self.drop_path2(self.norm2(self.mlp(x)))
413
+ x = x.reshape(B, H, W, C)
414
+ return x
415
+
416
+
417
+ class PatchMerging(nn.Module):
418
+ """ Patch Merging Layer.
419
+ """
420
+
421
+ def __init__(
422
+ self,
423
+ dim: int,
424
+ out_dim: Optional[int] = None,
425
+ norm_layer: nn.Module = nn.LayerNorm
426
+ ):
427
+ """
428
+ Args:
429
+ dim (int): Number of input channels.
430
+ out_dim (int): Number of output channels (or 2 * dim if None)
431
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
432
+ """
433
+ super().__init__()
434
+ self.dim = dim
435
+ self.out_dim = out_dim or 2 * dim
436
+ self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False)
437
+ self.norm = norm_layer(self.out_dim)
438
+
439
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
440
+ B, H, W, C = x.shape
441
+
442
+ pad_values = (0, 0, 0, W % 2, 0, H % 2)
443
+ x = nn.functional.pad(x, pad_values)
444
+ _, H, W, _ = x.shape
445
+
446
+ x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
447
+ x = self.reduction(x)
448
+ x = self.norm(x)
449
+ return x
450
+
451
+
452
+ class SwinTransformerV2Stage(nn.Module):
453
+ """ A Swin Transformer V2 Stage.
454
+ """
455
+
456
+ def __init__(
457
+ self,
458
+ dim: int,
459
+ out_dim: int,
460
+ input_resolution: _int_or_tuple_2_t,
461
+ depth: int,
462
+ num_heads: int,
463
+ window_size: _int_or_tuple_2_t,
464
+ always_partition: bool = False,
465
+ dynamic_mask: bool = False,
466
+ downsample: bool = False,
467
+ mlp_ratio: float = 4.,
468
+ qkv_bias: bool = True,
469
+ proj_drop: float = 0.,
470
+ attn_drop: float = 0.,
471
+ drop_path: float = 0.,
472
+ act_layer: Union[str, Callable] = 'gelu',
473
+ norm_layer: nn.Module = nn.LayerNorm,
474
+ pretrained_window_size: _int_or_tuple_2_t = 0,
475
+ output_nchw: bool = False,
476
+ ) -> None:
477
+ """
478
+ Args:
479
+ dim: Number of input channels.
480
+ out_dim: Number of output channels.
481
+ input_resolution: Input resolution.
482
+ depth: Number of blocks.
483
+ num_heads: Number of attention heads.
484
+ window_size: Local window size.
485
+ always_partition: Always partition into full windows and shift
486
+ dynamic_mask: Create attention mask in forward based on current input size
487
+ downsample: Use downsample layer at start of the block.
488
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
489
+ qkv_bias: If True, add a learnable bias to query, key, value.
490
+ proj_drop: Projection dropout rate
491
+ attn_drop: Attention dropout rate.
492
+ drop_path: Stochastic depth rate.
493
+ act_layer: Activation layer type.
494
+ norm_layer: Normalization layer.
495
+ pretrained_window_size: Local window size in pretraining.
496
+ output_nchw: Output tensors on NCHW format instead of NHWC.
497
+ """
498
+ super().__init__()
499
+ self.dim = dim
500
+ self.input_resolution = input_resolution
501
+ self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution
502
+ self.depth = depth
503
+ self.output_nchw = output_nchw
504
+ self.grad_checkpointing = False
505
+ window_size = to_2tuple(window_size)
506
+ shift_size = tuple([w // 2 for w in window_size])
507
+
508
+ # patch merging / downsample layer
509
+ if downsample:
510
+ self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer)
511
+ else:
512
+ assert dim == out_dim
513
+ self.downsample = nn.Identity()
514
+
515
+ # build blocks
516
+ self.blocks = nn.ModuleList([
517
+ SwinTransformerV2Block(
518
+ dim=out_dim,
519
+ input_resolution=self.output_resolution,
520
+ num_heads=num_heads,
521
+ window_size=window_size,
522
+ shift_size=0 if (i % 2 == 0) else shift_size,
523
+ always_partition=always_partition,
524
+ dynamic_mask=dynamic_mask,
525
+ mlp_ratio=mlp_ratio,
526
+ qkv_bias=qkv_bias,
527
+ proj_drop=proj_drop,
528
+ attn_drop=attn_drop,
529
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
530
+ act_layer=act_layer,
531
+ norm_layer=norm_layer,
532
+ pretrained_window_size=pretrained_window_size,
533
+ )
534
+ for i in range(depth)])
535
+
536
+ def set_input_size(
537
+ self,
538
+ feat_size: Tuple[int, int],
539
+ window_size: int,
540
+ always_partition: Optional[bool] = None,
541
+ ):
542
+ """ Updates the resolution, window size and so the pair-wise relative positions.
543
+
544
+ Args:
545
+ feat_size: New input (feature) resolution
546
+ window_size: New window size
547
+ always_partition: Always partition / shift the window
548
+ """
549
+ self.input_resolution = feat_size
550
+ if isinstance(self.downsample, nn.Identity):
551
+ self.output_resolution = feat_size
552
+ else:
553
+ assert isinstance(self.downsample, PatchMerging)
554
+ self.output_resolution = tuple(i // 2 for i in feat_size)
555
+ for block in self.blocks:
556
+ block.set_input_size(
557
+ feat_size=self.output_resolution,
558
+ window_size=window_size,
559
+ always_partition=always_partition,
560
+ )
561
+
562
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
563
+ x = self.downsample(x)
564
+
565
+ for blk in self.blocks:
566
+ if self.grad_checkpointing and not torch.jit.is_scripting():
567
+ x = checkpoint.checkpoint(blk, x)
568
+ else:
569
+ x = blk(x)
570
+ return x
571
+
572
+ def _init_respostnorm(self) -> None:
573
+ for blk in self.blocks:
574
+ nn.init.constant_(blk.norm1.bias, 0)
575
+ nn.init.constant_(blk.norm1.weight, 0)
576
+ nn.init.constant_(blk.norm2.bias, 0)
577
+ nn.init.constant_(blk.norm2.weight, 0)
578
+
579
+
580
+ class SwinTransformerV2(nn.Module):
581
+ """ Swin Transformer V2
582
+
583
+ A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
584
+ - https://arxiv.org/abs/2111.09883
585
+ """
586
+
587
+ def __init__(
588
+ self,
589
+ img_size: _int_or_tuple_2_t = 224,
590
+ patch_size: int = 4,
591
+ in_chans: int = 3,
592
+ num_classes: int = 1000,
593
+ global_pool: str = 'avg',
594
+ embed_dim: int = 96,
595
+ depths: Tuple[int, ...] = (2, 2, 6, 2),
596
+ num_heads: Tuple[int, ...] = (3, 6, 12, 24),
597
+ window_size: _int_or_tuple_2_t = 7,
598
+ always_partition: bool = False,
599
+ strict_img_size: bool = True,
600
+ mlp_ratio: float = 4.,
601
+ qkv_bias: bool = True,
602
+ drop_rate: float = 0.,
603
+ proj_drop_rate: float = 0.,
604
+ attn_drop_rate: float = 0.,
605
+ drop_path_rate: float = 0.1,
606
+ act_layer: Union[str, Callable] = 'gelu',
607
+ norm_layer: Callable = nn.LayerNorm,
608
+ pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0),
609
+ **kwargs,
610
+ ):
611
+ """
612
+ Args:
613
+ img_size: Input image size.
614
+ patch_size: Patch size.
615
+ in_chans: Number of input image channels.
616
+ num_classes: Number of classes for classification head.
617
+ embed_dim: Patch embedding dimension.
618
+ depths: Depth of each Swin Transformer stage (layer).
619
+ num_heads: Number of attention heads in different layers.
620
+ window_size: Window size.
621
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
622
+ qkv_bias: If True, add a learnable bias to query, key, value.
623
+ drop_rate: Head dropout rate.
624
+ proj_drop_rate: Projection dropout rate.
625
+ attn_drop_rate: Attention dropout rate.
626
+ drop_path_rate: Stochastic depth rate.
627
+ norm_layer: Normalization layer.
628
+ act_layer: Activation layer type.
629
+ patch_norm: If True, add normalization after patch embedding.
630
+ pretrained_window_sizes: Pretrained window sizes of each layer.
631
+ output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default.
632
+ """
633
+ super().__init__()
634
+
635
+ self.num_classes = num_classes
636
+ assert global_pool in ('', 'avg')
637
+ self.global_pool = global_pool
638
+ self.output_fmt = 'NHWC'
639
+ self.num_layers = len(depths)
640
+ self.embed_dim = embed_dim
641
+ self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (self.num_layers - 1))
642
+ self.feature_info = []
643
+
644
+ if not isinstance(embed_dim, (tuple, list)):
645
+ embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
646
+
647
+ # split image into non-overlapping patches
648
+ self.patch_embed = PatchEmbed(
649
+ img_size=img_size,
650
+ patch_size=patch_size,
651
+ in_chans=in_chans,
652
+ embed_dim=embed_dim[0],
653
+ norm_layer=norm_layer,
654
+ strict_img_size=strict_img_size,
655
+ output_fmt='NHWC',
656
+ )
657
+ grid_size = self.patch_embed.grid_size
658
+
659
+ dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
660
+ layers = []
661
+ in_dim = embed_dim[0]
662
+ scale = 1
663
+ for i in range(self.num_layers):
664
+ out_dim = embed_dim[i]
665
+ layers += [SwinTransformerV2Stage(
666
+ dim=in_dim,
667
+ out_dim=out_dim,
668
+ input_resolution=(grid_size[0] // scale, grid_size[1] // scale),
669
+ depth=depths[i],
670
+ downsample=i > 0,
671
+ num_heads=num_heads[i],
672
+ window_size=window_size,
673
+ always_partition=always_partition,
674
+ dynamic_mask=not strict_img_size,
675
+ mlp_ratio=mlp_ratio,
676
+ qkv_bias=qkv_bias,
677
+ proj_drop=proj_drop_rate,
678
+ attn_drop=attn_drop_rate,
679
+ drop_path=dpr[i],
680
+ act_layer=act_layer,
681
+ norm_layer=norm_layer,
682
+ pretrained_window_size=pretrained_window_sizes[i],
683
+ )]
684
+ in_dim = out_dim
685
+ if i > 0:
686
+ scale *= 2
687
+ self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')]
688
+
689
+ self.layers = nn.Sequential(*layers)
690
+ self.norm = norm_layer(self.num_features)
691
+ self.head = ClassifierHead(
692
+ self.num_features,
693
+ num_classes,
694
+ pool_type=global_pool,
695
+ drop_rate=drop_rate,
696
+ input_fmt=self.output_fmt,
697
+ )
698
+
699
+ self.apply(self._init_weights)
700
+ for bly in self.layers:
701
+ bly._init_respostnorm()
702
+
703
+ def _init_weights(self, m):
704
+ if isinstance(m, nn.Linear):
705
+ trunc_normal_(m.weight, std=.02)
706
+ if isinstance(m, nn.Linear) and m.bias is not None:
707
+ nn.init.constant_(m.bias, 0)
708
+
709
+ def set_input_size(
710
+ self,
711
+ img_size: Optional[Tuple[int, int]] = None,
712
+ patch_size: Optional[Tuple[int, int]] = None,
713
+ window_size: Optional[Tuple[int, int]] = None,
714
+ window_ratio: Optional[int] = 8,
715
+ always_partition: Optional[bool] = None,
716
+ ):
717
+ """Updates the image resolution, window size, and so the pair-wise relative positions.
718
+
719
+ Args:
720
+ img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used
721
+ patch_size (Optional[Tuple[int, int]): New patch size, if None use current patch size
722
+ window_size (Optional[int]): New window size, if None based on new_img_size // window_div
723
+ window_ratio (int): divisor for calculating window size from patch grid size
724
+ always_partition: always partition / shift windows even if feat size is < window
725
+ """
726
+ if img_size is not None or patch_size is not None:
727
+ self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size)
728
+ grid_size = self.patch_embed.grid_size
729
+
730
+ if window_size is None and window_ratio is not None:
731
+ window_size = tuple([s // window_ratio for s in grid_size])
732
+
733
+ for index, stage in enumerate(self.layers):
734
+ stage_scale = 2 ** max(index - 1, 0)
735
+ stage.set_input_size(
736
+ feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale),
737
+ window_size=window_size,
738
+ always_partition=always_partition,
739
+ )
740
+
741
+ @torch.jit.ignore
742
+ def no_weight_decay(self):
743
+ nod = set()
744
+ for n, m in self.named_modules():
745
+ if any([kw in n for kw in ("cpb_mlp", "logit_scale")]):
746
+ nod.add(n)
747
+ return nod
748
+
749
+ @torch.jit.ignore
750
+ def group_matcher(self, coarse=False):
751
+ return dict(
752
+ stem=r'^absolute_pos_embed|patch_embed', # stem and embed
753
+ blocks=r'^layers\.(\d+)' if coarse else [
754
+ (r'^layers\.(\d+).downsample', (0,)),
755
+ (r'^layers\.(\d+)\.\w+\.(\d+)', None),
756
+ (r'^norm', (99999,)),
757
+ ]
758
+ )
759
+
760
+ @torch.jit.ignore
761
+ def set_grad_checkpointing(self, enable=True):
762
+ for l in self.layers:
763
+ l.grad_checkpointing = enable
764
+
765
+ @torch.jit.ignore
766
+ def get_classifier(self) -> nn.Module:
767
+ return self.head.fc
768
+
769
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
770
+ self.num_classes = num_classes
771
+ self.head.reset(num_classes, global_pool)
772
+
773
+ def forward_intermediates(
774
+ self,
775
+ x: torch.Tensor,
776
+ indices: Optional[Union[int, List[int]]] = None,
777
+ norm: bool = False,
778
+ stop_early: bool = False,
779
+ output_fmt: str = 'NCHW',
780
+ intermediates_only: bool = False,
781
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
782
+ """ Forward features that returns intermediates.
783
+
784
+ Args:
785
+ x: Input image tensor
786
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
787
+ norm: Apply norm layer to compatible intermediates
788
+ stop_early: Stop iterating over blocks when last desired intermediate hit
789
+ output_fmt: Shape of intermediate feature outputs
790
+ intermediates_only: Only return intermediate features
791
+ Returns:
792
+
793
+ """
794
+ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
795
+ intermediates = []
796
+ take_indices, max_index = feature_take_indices(len(self.layers), indices)
797
+
798
+ # forward pass
799
+ x = self.patch_embed(x)
800
+
801
+ num_stages = len(self.layers)
802
+ if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
803
+ stages = self.layers
804
+ else:
805
+ stages = self.layers[:max_index + 1]
806
+ for i, stage in enumerate(stages):
807
+ x = stage(x)
808
+ if i in take_indices:
809
+ if norm and i == num_stages - 1:
810
+ x_inter = self.norm(x) # applying final norm last intermediate
811
+ else:
812
+ x_inter = x
813
+ x_inter = x_inter.permute(0, 3, 1, 2).contiguous()
814
+ intermediates.append(x_inter)
815
+
816
+ if intermediates_only:
817
+ return intermediates
818
+
819
+ x = self.norm(x)
820
+
821
+ return x, intermediates
822
+
823
+ def prune_intermediate_layers(
824
+ self,
825
+ indices: Union[int, List[int]] = 1,
826
+ prune_norm: bool = False,
827
+ prune_head: bool = True,
828
+ ):
829
+ """ Prune layers not required for specified intermediates.
830
+ """
831
+ take_indices, max_index = feature_take_indices(len(self.layers), indices)
832
+ self.layers = self.layers[:max_index + 1] # truncate blocks
833
+ if prune_norm:
834
+ self.norm = nn.Identity()
835
+ if prune_head:
836
+ self.reset_classifier(0, '')
837
+ return take_indices
838
+
839
+ def forward_features(self, x):
840
+ x = self.patch_embed(x)
841
+ x = self.layers(x)
842
+ x = self.norm(x)
843
+ return x
844
+
845
+ def forward_head(self, x, pre_logits: bool = False):
846
+ return self.head(x, pre_logits=True) if pre_logits else self.head(x)
847
+
848
+ def forward(self, x):
849
+ x = self.forward_features(x)
850
+ x = self.forward_head(x)
851
+ return x
852
+
853
+
854
+ def checkpoint_filter_fn(state_dict, model):
855
+ state_dict = state_dict.get('model', state_dict)
856
+ state_dict = state_dict.get('state_dict', state_dict)
857
+ native_checkpoint = 'head.fc.weight' in state_dict
858
+ out_dict = {}
859
+ import re
860
+ for k, v in state_dict.items():
861
+ if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]):
862
+ continue # skip buffers that should not be persistent
863
+
864
+ if 'patch_embed.proj.weight' in k:
865
+ _, _, H, W = model.patch_embed.proj.weight.shape
866
+ if v.shape[-2] != H or v.shape[-1] != W:
867
+ v = resample_patch_embed(
868
+ v,
869
+ (H, W),
870
+ interpolation='bicubic',
871
+ antialias=True,
872
+ verbose=True,
873
+ )
874
+
875
+ if not native_checkpoint:
876
+ # skip layer remapping for updated checkpoints
877
+ k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
878
+ k = k.replace('head.', 'head.fc.')
879
+ out_dict[k] = v
880
+
881
+ return out_dict
882
+
883
+
884
+ def _create_swin_transformer_v2(variant, pretrained=False, **kwargs):
885
+ default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
886
+ out_indices = kwargs.pop('out_indices', default_out_indices)
887
+
888
+ model = build_model_with_cfg(
889
+ SwinTransformerV2, variant, pretrained,
890
+ pretrained_filter_fn=checkpoint_filter_fn,
891
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
892
+ **kwargs)
893
+ return model
894
+
895
+
896
+ def _cfg(url='', **kwargs):
897
+ return {
898
+ 'url': url,
899
+ 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
900
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
901
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
902
+ 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
903
+ 'license': 'mit', **kwargs
904
+ }
905
+
906
+
907
+ default_cfgs = generate_default_cfgs({
908
+ 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
909
+ hf_hub_id='timm/',
910
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth',
911
+ ),
912
+ 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
913
+ hf_hub_id='timm/',
914
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth',
915
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
916
+ ),
917
+ 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg(
918
+ hf_hub_id='timm/',
919
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth',
920
+ ),
921
+ 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg(
922
+ hf_hub_id='timm/',
923
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth',
924
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
925
+ ),
926
+
927
+ 'swinv2_tiny_window8_256.ms_in1k': _cfg(
928
+ hf_hub_id='timm/',
929
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth',
930
+ ),
931
+ 'swinv2_tiny_window16_256.ms_in1k': _cfg(
932
+ hf_hub_id='timm/',
933
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth',
934
+ ),
935
+ 'swinv2_small_window8_256.ms_in1k': _cfg(
936
+ hf_hub_id='timm/',
937
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth',
938
+ ),
939
+ 'swinv2_small_window16_256.ms_in1k': _cfg(
940
+ hf_hub_id='timm/',
941
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth',
942
+ ),
943
+ 'swinv2_base_window8_256.ms_in1k': _cfg(
944
+ hf_hub_id='timm/',
945
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth',
946
+ ),
947
+ 'swinv2_base_window16_256.ms_in1k': _cfg(
948
+ hf_hub_id='timm/',
949
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth',
950
+ ),
951
+
952
+ 'swinv2_base_window12_192.ms_in22k': _cfg(
953
+ hf_hub_id='timm/',
954
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth',
955
+ num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
956
+ ),
957
+ 'swinv2_large_window12_192.ms_in22k': _cfg(
958
+ hf_hub_id='timm/',
959
+ url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth',
960
+ num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6)
961
+ ),
962
+ })
963
+
964
+
965
+ @register_model
966
+ def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
967
+ """
968
+ """
969
+ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
970
+ return _create_swin_transformer_v2(
971
+ 'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
972
+
973
+
974
+ @register_model
975
+ def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
976
+ """
977
+ """
978
+ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24))
979
+ return _create_swin_transformer_v2(
980
+ 'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
981
+
982
+
983
+ @register_model
984
+ def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
985
+ """
986
+ """
987
+ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
988
+ return _create_swin_transformer_v2(
989
+ 'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
990
+
991
+
992
+ @register_model
993
+ def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
994
+ """
995
+ """
996
+ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24))
997
+ return _create_swin_transformer_v2(
998
+ 'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
999
+
1000
+
1001
+ @register_model
1002
+ def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2:
1003
+ """
1004
+ """
1005
+ model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
1006
+ return _create_swin_transformer_v2(
1007
+ 'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs))
1008
+
1009
+
1010
+ @register_model
1011
+ def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2:
1012
+ """
1013
+ """
1014
+ model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
1015
+ return _create_swin_transformer_v2(
1016
+ 'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs))
1017
+
1018
+
1019
+ @register_model
1020
+ def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
1021
+ """
1022
+ """
1023
+ model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32))
1024
+ return _create_swin_transformer_v2(
1025
+ 'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
1026
+
1027
+
1028
+ @register_model
1029
+ def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
1030
+ """
1031
+ """
1032
+ model_args = dict(
1033
+ window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
1034
+ pretrained_window_sizes=(12, 12, 12, 6))
1035
+ return _create_swin_transformer_v2(
1036
+ 'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
1037
+
1038
+
1039
+ @register_model
1040
+ def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
1041
+ """
1042
+ """
1043
+ model_args = dict(
1044
+ window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32),
1045
+ pretrained_window_sizes=(12, 12, 12, 6))
1046
+ return _create_swin_transformer_v2(
1047
+ 'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
1048
+
1049
+
1050
+ @register_model
1051
+ def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2:
1052
+ """
1053
+ """
1054
+ model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48))
1055
+ return _create_swin_transformer_v2(
1056
+ 'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs))
1057
+
1058
+
1059
+ @register_model
1060
+ def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2:
1061
+ """
1062
+ """
1063
+ model_args = dict(
1064
+ window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
1065
+ pretrained_window_sizes=(12, 12, 12, 6))
1066
+ return _create_swin_transformer_v2(
1067
+ 'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs))
1068
+
1069
+
1070
+ @register_model
1071
+ def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2:
1072
+ """
1073
+ """
1074
+ model_args = dict(
1075
+ window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48),
1076
+ pretrained_window_sizes=(12, 12, 12, 6))
1077
+ return _create_swin_transformer_v2(
1078
+ 'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs))
1079
+
1080
+
1081
+ register_model_deprecations(__name__, {
1082
+ 'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k',
1083
+ 'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k',
1084
+ 'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k',
1085
+ 'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k',
1086
+ 'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k',
1087
+ 'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k',
1088
+ })
pytorch-image-models/timm/models/swin_transformer_v2_cr.py ADDED
@@ -0,0 +1,1153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Swin Transformer V2
2
+
3
+ A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution`
4
+ - https://arxiv.org/pdf/2111.09883
5
+
6
+ Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below
7
+
8
+ This implementation is experimental and subject to change in manners that will break weight compat:
9
+ * Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads?
10
+ * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head)
11
+ * The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at
12
+ GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial.
13
+ * num_heads per stage is not detailed for Huge and Giant model variants
14
+ * 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts
15
+ * experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme
16
+
17
+ Noteworthy additions over official Swin v1:
18
+ * MLP relative position embedding is looking promising and adapts to different image/window sizes
19
+ * This impl has been designed to allow easy change of image size with matching window size changes
20
+ * Non-square image size and window size are supported
21
+
22
+ Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
23
+ """
24
+ # --------------------------------------------------------
25
+ # Swin Transformer V2 reimplementation
26
+ # Copyright (c) 2021 Christoph Reich
27
+ # Licensed under The MIT License [see LICENSE for details]
28
+ # Written by Christoph Reich
29
+ # --------------------------------------------------------
30
+ import logging
31
+ import math
32
+ from typing import Tuple, Optional, List, Union, Any, Type
33
+
34
+ import torch
35
+ import torch.nn as nn
36
+ import torch.nn.functional as F
37
+ import torch.utils.checkpoint as checkpoint
38
+
39
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
40
+ from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid
41
+ from ._builder import build_model_with_cfg
42
+ from ._features import feature_take_indices
43
+ from ._features_fx import register_notrace_function
44
+ from ._manipulate import named_apply
45
+ from ._registry import generate_default_cfgs, register_model
46
+
47
+ __all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this
48
+
49
+ _logger = logging.getLogger(__name__)
50
+
51
+
52
+ def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor:
53
+ """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """
54
+ return x.permute(0, 2, 3, 1)
55
+
56
+
57
+ def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor:
58
+ """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """
59
+ return x.permute(0, 3, 1, 2)
60
+
61
+
62
+ def window_partition(x, window_size: Tuple[int, int]):
63
+ """
64
+ Args:
65
+ x: (B, H, W, C)
66
+ window_size (int): window size
67
+
68
+ Returns:
69
+ windows: (num_windows*B, window_size, window_size, C)
70
+ """
71
+ B, H, W, C = x.shape
72
+ x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
73
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
74
+ return windows
75
+
76
+
77
+ @register_notrace_function # reason: int argument is a Proxy
78
+ def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
79
+ """
80
+ Args:
81
+ windows: (num_windows * B, window_size[0], window_size[1], C)
82
+ window_size (Tuple[int, int]): Window size
83
+ img_size (Tuple[int, int]): Image size
84
+
85
+ Returns:
86
+ x: (B, H, W, C)
87
+ """
88
+ H, W = img_size
89
+ C = windows.shape[-1]
90
+ x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
91
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
92
+ return x
93
+
94
+
95
+ class WindowMultiHeadAttention(nn.Module):
96
+ r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias.
97
+
98
+ Args:
99
+ dim (int): Number of input features
100
+ window_size (int): Window size
101
+ num_heads (int): Number of attention heads
102
+ drop_attn (float): Dropout rate of attention map
103
+ drop_proj (float): Dropout rate after projection
104
+ meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network
105
+ sequential_attn (bool): If true sequential self-attention is performed
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ dim: int,
111
+ num_heads: int,
112
+ window_size: Tuple[int, int],
113
+ drop_attn: float = 0.0,
114
+ drop_proj: float = 0.0,
115
+ meta_hidden_dim: int = 384, # FIXME what's the optimal value?
116
+ sequential_attn: bool = False,
117
+ ) -> None:
118
+ super(WindowMultiHeadAttention, self).__init__()
119
+ assert dim % num_heads == 0, \
120
+ "The number of input features (in_features) are not divisible by the number of heads (num_heads)."
121
+ self.in_features: int = dim
122
+ self.window_size: Tuple[int, int] = to_2tuple(window_size)
123
+ self.num_heads: int = num_heads
124
+ self.sequential_attn: bool = sequential_attn
125
+
126
+ self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True)
127
+ self.attn_drop = nn.Dropout(drop_attn)
128
+ self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True)
129
+ self.proj_drop = nn.Dropout(drop_proj)
130
+ # meta network for positional encodings
131
+ self.meta_mlp = Mlp(
132
+ 2, # x, y
133
+ hidden_features=meta_hidden_dim,
134
+ out_features=num_heads,
135
+ act_layer=nn.ReLU,
136
+ drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without?
137
+ )
138
+ # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn
139
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads)))
140
+ self._make_pair_wise_relative_positions()
141
+
142
+ def _make_pair_wise_relative_positions(self) -> None:
143
+ """Method initializes the pair-wise relative positions to compute the positional biases."""
144
+ device = self.logit_scale.device
145
+ coordinates = torch.stack(ndgrid(
146
+ torch.arange(self.window_size[0], device=device),
147
+ torch.arange(self.window_size[1], device=device)
148
+ ), dim=0).flatten(1)
149
+ relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :]
150
+ relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float()
151
+ relative_coordinates_log = torch.sign(relative_coordinates) * torch.log(
152
+ 1.0 + relative_coordinates.abs())
153
+ self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False)
154
+
155
+ def set_window_size(self, window_size: Tuple[int, int]) -> None:
156
+ """Update window size & interpolate position embeddings
157
+ Args:
158
+ window_size (int): New window size
159
+ """
160
+ window_size = to_2tuple(window_size)
161
+ if window_size != self.window_size:
162
+ self.window_size = window_size
163
+ self._make_pair_wise_relative_positions()
164
+
165
+ def _relative_positional_encodings(self) -> torch.Tensor:
166
+ """Method computes the relative positional encodings
167
+
168
+ Returns:
169
+ relative_position_bias (torch.Tensor): Relative positional encodings
170
+ (1, number of heads, window size ** 2, window size ** 2)
171
+ """
172
+ window_area = self.window_size[0] * self.window_size[1]
173
+ relative_position_bias = self.meta_mlp(self.relative_coordinates_log)
174
+ relative_position_bias = relative_position_bias.transpose(1, 0).reshape(
175
+ self.num_heads, window_area, window_area
176
+ )
177
+ relative_position_bias = relative_position_bias.unsqueeze(0)
178
+ return relative_position_bias
179
+
180
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
181
+ """ Forward pass.
182
+ Args:
183
+ x (torch.Tensor): Input tensor of the shape (B * windows, N, C)
184
+ mask (Optional[torch.Tensor]): Attention mask for the shift case
185
+
186
+ Returns:
187
+ Output tensor of the shape [B * windows, N, C]
188
+ """
189
+ Bw, L, C = x.shape
190
+
191
+ qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
192
+ query, key, value = qkv.unbind(0)
193
+
194
+ # compute attention map with scaled cosine attention
195
+ attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1))
196
+ logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp()
197
+ attn = attn * logit_scale
198
+ attn = attn + self._relative_positional_encodings()
199
+
200
+ if mask is not None:
201
+ # Apply mask if utilized
202
+ num_win: int = mask.shape[0]
203
+ attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L)
204
+ attn = attn + mask.unsqueeze(1).unsqueeze(0)
205
+ attn = attn.view(-1, self.num_heads, L, L)
206
+ attn = attn.softmax(dim=-1)
207
+ attn = self.attn_drop(attn)
208
+
209
+ x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1)
210
+ x = self.proj(x)
211
+ x = self.proj_drop(x)
212
+ return x
213
+
214
+
215
+ class SwinTransformerV2CrBlock(nn.Module):
216
+ r"""This class implements the Swin transformer block.
217
+
218
+ Args:
219
+ dim (int): Number of input channels
220
+ num_heads (int): Number of attention heads to be utilized
221
+ feat_size (Tuple[int, int]): Input resolution
222
+ window_size (Tuple[int, int]): Window size to be utilized
223
+ shift_size (int): Shifting size to be used
224
+ mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
225
+ proj_drop (float): Dropout in input mapping
226
+ drop_attn (float): Dropout rate of attention map
227
+ drop_path (float): Dropout in main path
228
+ extra_norm (bool): Insert extra norm on 'main' branch if True
229
+ sequential_attn (bool): If true sequential self-attention is performed
230
+ norm_layer (Type[nn.Module]): Type of normalization layer to be utilized
231
+ """
232
+
233
+ def __init__(
234
+ self,
235
+ dim: int,
236
+ num_heads: int,
237
+ feat_size: Tuple[int, int],
238
+ window_size: Tuple[int, int],
239
+ shift_size: Tuple[int, int] = (0, 0),
240
+ always_partition: bool = False,
241
+ dynamic_mask: bool = False,
242
+ mlp_ratio: float = 4.0,
243
+ init_values: Optional[float] = 0,
244
+ proj_drop: float = 0.0,
245
+ drop_attn: float = 0.0,
246
+ drop_path: float = 0.0,
247
+ extra_norm: bool = False,
248
+ sequential_attn: bool = False,
249
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
250
+ ):
251
+ super(SwinTransformerV2CrBlock, self).__init__()
252
+ self.dim: int = dim
253
+ self.feat_size: Tuple[int, int] = feat_size
254
+ self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size)
255
+ self.always_partition = always_partition
256
+ self.dynamic_mask = dynamic_mask
257
+ self.window_size, self.shift_size = self._calc_window_shift(window_size)
258
+ self.window_area = self.window_size[0] * self.window_size[1]
259
+ self.init_values: Optional[float] = init_values
260
+
261
+ # attn branch
262
+ self.attn = WindowMultiHeadAttention(
263
+ dim=dim,
264
+ num_heads=num_heads,
265
+ window_size=self.window_size,
266
+ drop_attn=drop_attn,
267
+ drop_proj=proj_drop,
268
+ sequential_attn=sequential_attn,
269
+ )
270
+ self.norm1 = norm_layer(dim)
271
+ self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
272
+
273
+ # mlp branch
274
+ self.mlp = Mlp(
275
+ in_features=dim,
276
+ hidden_features=int(dim * mlp_ratio),
277
+ drop=proj_drop,
278
+ out_features=dim,
279
+ )
280
+ self.norm2 = norm_layer(dim)
281
+ self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity()
282
+
283
+ # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper.
284
+ # Also being used as final network norm and optional stage ending norm while still in a C-last format.
285
+ self.norm3 = norm_layer(dim) if extra_norm else nn.Identity()
286
+
287
+ self.register_buffer(
288
+ "attn_mask",
289
+ None if self.dynamic_mask else self.get_attn_mask(),
290
+ persistent=False,
291
+ )
292
+ self.init_weights()
293
+
294
+ def _calc_window_shift(
295
+ self,
296
+ target_window_size: Tuple[int, int],
297
+ ) -> Tuple[Tuple[int, int], Tuple[int, int]]:
298
+ target_window_size = to_2tuple(target_window_size)
299
+ target_shift_size = self.target_shift_size
300
+ if any(target_shift_size):
301
+ # if non-zero, recalculate shift from current window size in case window size has changed
302
+ target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2)
303
+
304
+ if self.always_partition:
305
+ return target_window_size, target_shift_size
306
+
307
+ window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)]
308
+ shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, target_shift_size)]
309
+ return tuple(window_size), tuple(shift_size)
310
+
311
+ def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]:
312
+ """Method generates the attention mask used in shift case."""
313
+ # Make masks for shift case
314
+ if any(self.shift_size):
315
+ # calculate attention mask for SW-MSA
316
+ if x is None:
317
+ img_mask = torch.zeros((1, *self.feat_size, 1)) # 1 H W 1
318
+ else:
319
+ img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1
320
+ cnt = 0
321
+ for h in (
322
+ (0, -self.window_size[0]),
323
+ (-self.window_size[0], -self.shift_size[0]),
324
+ (-self.shift_size[0], None),
325
+ ):
326
+ for w in (
327
+ (0, -self.window_size[1]),
328
+ (-self.window_size[1], -self.shift_size[1]),
329
+ (-self.shift_size[1], None),
330
+ ):
331
+ img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt
332
+ cnt += 1
333
+ mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1
334
+ mask_windows = mask_windows.view(-1, self.window_area)
335
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
336
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
337
+ else:
338
+ attn_mask = None
339
+ return attn_mask
340
+
341
+ def init_weights(self):
342
+ # extra, module specific weight init
343
+ if self.init_values is not None:
344
+ nn.init.constant_(self.norm1.weight, self.init_values)
345
+ nn.init.constant_(self.norm2.weight, self.init_values)
346
+
347
+ def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int]) -> None:
348
+ """Method updates the image resolution to be processed and window size and so the pair-wise relative positions.
349
+
350
+ Args:
351
+ feat_size (Tuple[int, int]): New input resolution
352
+ window_size (int): New window size
353
+ """
354
+ # Update input resolution
355
+ self.feat_size: Tuple[int, int] = feat_size
356
+ self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size))
357
+ self.window_area = self.window_size[0] * self.window_size[1]
358
+ self.attn.set_window_size(self.window_size)
359
+ self.register_buffer(
360
+ "attn_mask",
361
+ None if self.dynamic_mask else self.get_attn_mask(),
362
+ persistent=False,
363
+ )
364
+
365
+ def _shifted_window_attn(self, x):
366
+ B, H, W, C = x.shape
367
+
368
+ # cyclic shift
369
+ sh, sw = self.shift_size
370
+ do_shift: bool = any(self.shift_size)
371
+ if do_shift:
372
+ # FIXME PyTorch XLA needs cat impl, roll not lowered
373
+ # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1)
374
+ # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2)
375
+ x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2))
376
+
377
+ pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
378
+ pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
379
+ x = torch.nn.functional.pad(x, (0, 0, 0, pad_w, 0, pad_h))
380
+ _, Hp, Wp, _ = x.shape
381
+
382
+ # partition windows
383
+ x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C
384
+ x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C)
385
+
386
+ # W-MSA/SW-MSA
387
+ if getattr(self, 'dynamic_mask', False):
388
+ attn_mask = self.get_attn_mask(x)
389
+ else:
390
+ attn_mask = self.attn_mask
391
+ attn_windows = self.attn(x_windows, mask=attn_mask) # num_windows * B, window_size * window_size, C
392
+
393
+ # merge windows
394
+ attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C)
395
+ x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C
396
+ x = x[:, :H, :W, :].contiguous()
397
+
398
+ # reverse cyclic shift
399
+ if do_shift:
400
+ # FIXME PyTorch XLA needs cat impl, roll not lowered
401
+ # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1)
402
+ # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2)
403
+ x = torch.roll(x, shifts=(sh, sw), dims=(1, 2))
404
+
405
+ return x
406
+
407
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
408
+ """Forward pass.
409
+
410
+ Args:
411
+ x (torch.Tensor): Input tensor of the shape [B, C, H, W]
412
+
413
+ Returns:
414
+ output (torch.Tensor): Output tensor of the shape [B, C, H, W]
415
+ """
416
+ # post-norm branches (op -> norm -> drop)
417
+ x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x)))
418
+
419
+ B, H, W, C = x.shape
420
+ x = x.reshape(B, -1, C)
421
+ x = x + self.drop_path2(self.norm2(self.mlp(x)))
422
+ x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant)
423
+ x = x.reshape(B, H, W, C)
424
+ return x
425
+
426
+
427
+ class PatchMerging(nn.Module):
428
+ """ This class implements the patch merging as a strided convolution with a normalization before.
429
+ Args:
430
+ dim (int): Number of input channels
431
+ norm_layer (Type[nn.Module]): Type of normalization layer to be utilized.
432
+ """
433
+
434
+ def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None:
435
+ super(PatchMerging, self).__init__()
436
+ self.norm = norm_layer(4 * dim)
437
+ self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False)
438
+
439
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
440
+ """ Forward pass.
441
+ Args:
442
+ x (torch.Tensor): Input tensor of the shape [B, C, H, W]
443
+ Returns:
444
+ output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
445
+ """
446
+ B, H, W, C = x.shape
447
+
448
+ pad_values = (0, 0, 0, W % 2, 0, H % 2)
449
+ x = nn.functional.pad(x, pad_values)
450
+ _, H, W, _ = x.shape
451
+
452
+ x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3)
453
+ x = self.norm(x)
454
+ x = self.reduction(x)
455
+ return x
456
+
457
+
458
+ class PatchEmbed(nn.Module):
459
+ """ 2D Image to Patch Embedding """
460
+ def __init__(
461
+ self,
462
+ img_size=224,
463
+ patch_size=16,
464
+ in_chans=3,
465
+ embed_dim=768,
466
+ norm_layer=None,
467
+ strict_img_size=True,
468
+ ):
469
+ super().__init__()
470
+ img_size = to_2tuple(img_size)
471
+ patch_size = to_2tuple(patch_size)
472
+ self.img_size = img_size
473
+ self.patch_size = patch_size
474
+ self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
475
+ self.num_patches = self.grid_size[0] * self.grid_size[1]
476
+ self.strict_img_size = strict_img_size
477
+
478
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
479
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
480
+
481
+ def set_input_size(self, img_size: Tuple[int, int]):
482
+ img_size = to_2tuple(img_size)
483
+ if img_size != self.img_size:
484
+ self.img_size = img_size
485
+ self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1])
486
+ self.num_patches = self.grid_size[0] * self.grid_size[1]
487
+
488
+ def forward(self, x):
489
+ B, C, H, W = x.shape
490
+ if self.strict_img_size:
491
+ _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
492
+ _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
493
+ x = self.proj(x)
494
+ x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
495
+ return x
496
+
497
+
498
+ class SwinTransformerV2CrStage(nn.Module):
499
+ r"""This class implements a stage of the Swin transformer including multiple layers.
500
+
501
+ Args:
502
+ embed_dim (int): Number of input channels
503
+ depth (int): Depth of the stage (number of layers)
504
+ downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper)
505
+ feat_size (Tuple[int, int]): input feature map size (H, W)
506
+ num_heads (int): Number of attention heads to be utilized
507
+ window_size (int): Window size to be utilized
508
+ mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels
509
+ proj_drop (float): Dropout in input mapping
510
+ drop_attn (float): Dropout rate of attention map
511
+ drop_path (float): Dropout in main path
512
+ norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm
513
+ extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks
514
+ extra_norm_stage (bool): End each stage with an extra norm layer in main branch
515
+ sequential_attn (bool): If true sequential self-attention is performed
516
+ """
517
+
518
+ def __init__(
519
+ self,
520
+ embed_dim: int,
521
+ depth: int,
522
+ downscale: bool,
523
+ num_heads: int,
524
+ feat_size: Tuple[int, int],
525
+ window_size: Tuple[int, int],
526
+ always_partition: bool = False,
527
+ dynamic_mask: bool = False,
528
+ mlp_ratio: float = 4.0,
529
+ init_values: Optional[float] = 0.0,
530
+ proj_drop: float = 0.0,
531
+ drop_attn: float = 0.0,
532
+ drop_path: Union[List[float], float] = 0.0,
533
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
534
+ extra_norm_period: int = 0,
535
+ extra_norm_stage: bool = False,
536
+ sequential_attn: bool = False,
537
+ ):
538
+ super(SwinTransformerV2CrStage, self).__init__()
539
+ self.downscale: bool = downscale
540
+ self.grad_checkpointing: bool = False
541
+ self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size
542
+
543
+ if downscale:
544
+ self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer)
545
+ embed_dim = embed_dim * 2
546
+ else:
547
+ self.downsample = nn.Identity()
548
+
549
+ def _extra_norm(index):
550
+ i = index + 1
551
+ if extra_norm_period and i % extra_norm_period == 0:
552
+ return True
553
+ return i == depth if extra_norm_stage else False
554
+
555
+ self.blocks = nn.Sequential(*[
556
+ SwinTransformerV2CrBlock(
557
+ dim=embed_dim,
558
+ num_heads=num_heads,
559
+ feat_size=self.feat_size,
560
+ window_size=window_size,
561
+ always_partition=always_partition,
562
+ dynamic_mask=dynamic_mask,
563
+ shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]),
564
+ mlp_ratio=mlp_ratio,
565
+ init_values=init_values,
566
+ proj_drop=proj_drop,
567
+ drop_attn=drop_attn,
568
+ drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path,
569
+ extra_norm=_extra_norm(index),
570
+ sequential_attn=sequential_attn,
571
+ norm_layer=norm_layer,
572
+ )
573
+ for index in range(depth)]
574
+ )
575
+
576
+ def set_input_size(
577
+ self,
578
+ feat_size: Tuple[int, int],
579
+ window_size: int,
580
+ always_partition: Optional[bool] = None,
581
+ ):
582
+ """ Updates the resolution to utilize and the window size and so the pair-wise relative positions.
583
+
584
+ Args:
585
+ window_size (int): New window size
586
+ feat_size (Tuple[int, int]): New input resolution
587
+ """
588
+ self.feat_size = (feat_size[0] // 2, feat_size[1] // 2) if self.downscale else feat_size
589
+ for block in self.blocks:
590
+ block.set_input_size(
591
+ feat_size=self.feat_size,
592
+ window_size=window_size,
593
+ )
594
+
595
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
596
+ """Forward pass.
597
+ Args:
598
+ x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C]
599
+ Returns:
600
+ output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2]
601
+ """
602
+ x = bchw_to_bhwc(x)
603
+ x = self.downsample(x)
604
+ for block in self.blocks:
605
+ # Perform checkpointing if utilized
606
+ if self.grad_checkpointing and not torch.jit.is_scripting():
607
+ x = checkpoint.checkpoint(block, x)
608
+ else:
609
+ x = block(x)
610
+ x = bhwc_to_bchw(x)
611
+ return x
612
+
613
+
614
+ class SwinTransformerV2Cr(nn.Module):
615
+ r""" Swin Transformer V2
616
+ A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` -
617
+ https://arxiv.org/pdf/2111.09883
618
+
619
+ Args:
620
+ img_size: Input resolution.
621
+ window_size: Window size. If None, grid_size // window_div
622
+ window_ratio: Window size to patch grid ratio.
623
+ patch_size: Patch size.
624
+ in_chans: Number of input channels.
625
+ depths: Depth of the stage (number of layers).
626
+ num_heads: Number of attention heads to be utilized.
627
+ embed_dim: Patch embedding dimension.
628
+ num_classes: Number of output classes.
629
+ mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels.
630
+ drop_rate: Dropout rate.
631
+ proj_drop_rate: Projection dropout rate.
632
+ attn_drop_rate: Dropout rate of attention map.
633
+ drop_path_rate: Stochastic depth rate.
634
+ norm_layer: Type of normalization layer to be utilized.
635
+ extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage
636
+ extra_norm_stage: End each stage with an extra norm layer in main branch
637
+ sequential_attn: If true sequential self-attention is performed.
638
+ """
639
+
640
+ def __init__(
641
+ self,
642
+ img_size: Tuple[int, int] = (224, 224),
643
+ patch_size: int = 4,
644
+ window_size: Optional[int] = None,
645
+ window_ratio: int = 8,
646
+ always_partition: bool = False,
647
+ strict_img_size: bool = True,
648
+ in_chans: int = 3,
649
+ num_classes: int = 1000,
650
+ embed_dim: int = 96,
651
+ depths: Tuple[int, ...] = (2, 2, 6, 2),
652
+ num_heads: Tuple[int, ...] = (3, 6, 12, 24),
653
+ mlp_ratio: float = 4.0,
654
+ init_values: Optional[float] = 0.,
655
+ drop_rate: float = 0.0,
656
+ proj_drop_rate: float = 0.0,
657
+ attn_drop_rate: float = 0.0,
658
+ drop_path_rate: float = 0.0,
659
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
660
+ extra_norm_period: int = 0,
661
+ extra_norm_stage: bool = False,
662
+ sequential_attn: bool = False,
663
+ global_pool: str = 'avg',
664
+ weight_init='skip',
665
+ **kwargs: Any
666
+ ) -> None:
667
+ super(SwinTransformerV2Cr, self).__init__()
668
+ img_size = to_2tuple(img_size)
669
+ self.num_classes: int = num_classes
670
+ self.patch_size: int = patch_size
671
+ self.img_size: Tuple[int, int] = img_size
672
+ self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
673
+ self.feature_info = []
674
+
675
+ self.patch_embed = PatchEmbed(
676
+ img_size=img_size,
677
+ patch_size=patch_size,
678
+ in_chans=in_chans,
679
+ embed_dim=embed_dim,
680
+ norm_layer=norm_layer,
681
+ strict_img_size=strict_img_size,
682
+ )
683
+ grid_size = self.patch_embed.grid_size
684
+ if window_size is None:
685
+ self.window_size = tuple([s // window_ratio for s in grid_size])
686
+ else:
687
+ self.window_size = to_2tuple(window_size)
688
+
689
+ dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
690
+ stages = []
691
+ in_dim = embed_dim
692
+ in_scale = 1
693
+ for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)):
694
+ stages += [SwinTransformerV2CrStage(
695
+ embed_dim=in_dim,
696
+ depth=depth,
697
+ downscale=stage_idx != 0,
698
+ feat_size=(grid_size[0] // in_scale, grid_size[1] // in_scale),
699
+ num_heads=num_heads,
700
+ window_size=self.window_size,
701
+ always_partition=always_partition,
702
+ dynamic_mask=not strict_img_size,
703
+ mlp_ratio=mlp_ratio,
704
+ init_values=init_values,
705
+ proj_drop=proj_drop_rate,
706
+ drop_attn=attn_drop_rate,
707
+ drop_path=dpr[stage_idx],
708
+ extra_norm_period=extra_norm_period,
709
+ extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm
710
+ sequential_attn=sequential_attn,
711
+ norm_layer=norm_layer,
712
+ )]
713
+ if stage_idx != 0:
714
+ in_dim *= 2
715
+ in_scale *= 2
716
+ self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')]
717
+ self.stages = nn.Sequential(*stages)
718
+
719
+ self.head = ClassifierHead(
720
+ self.num_features,
721
+ num_classes,
722
+ pool_type=global_pool,
723
+ drop_rate=drop_rate,
724
+ )
725
+
726
+ # current weight init skips custom init and uses pytorch layer defaults, seems to work well
727
+ # FIXME more experiments needed
728
+ if weight_init != 'skip':
729
+ named_apply(init_weights, self)
730
+
731
+ def set_input_size(
732
+ self,
733
+ img_size: Optional[Tuple[int, int]] = None,
734
+ window_size: Optional[Tuple[int, int]] = None,
735
+ window_ratio: int = 8,
736
+ always_partition: Optional[bool] = None,
737
+ ) -> None:
738
+ """Updates the image resolution, window size and so the pair-wise relative positions.
739
+
740
+ Args:
741
+ img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used
742
+ window_size (Optional[int]): New window size, if None based on new_img_size // window_div
743
+ window_ratio (int): divisor for calculating window size from patch grid size
744
+ always_partition: always partition / shift windows even if feat size is < window
745
+ """
746
+ if img_size is not None:
747
+ self.patch_embed.set_input_size(img_size=img_size)
748
+ grid_size = self.patch_embed.grid_size
749
+
750
+ if window_size is None and window_ratio is not None:
751
+ window_size = tuple([s // window_ratio for s in grid_size])
752
+
753
+ for index, stage in enumerate(self.stages):
754
+ stage_scale = 2 ** max(index - 1, 0)
755
+ stage.set_input_size(
756
+ feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale),
757
+ window_size=window_size,
758
+ always_partition=always_partition,
759
+ )
760
+
761
+ @torch.jit.ignore
762
+ def group_matcher(self, coarse=False):
763
+ return dict(
764
+ stem=r'^patch_embed', # stem and embed
765
+ blocks=r'^stages\.(\d+)' if coarse else [
766
+ (r'^stages\.(\d+).downsample', (0,)),
767
+ (r'^stages\.(\d+)\.\w+\.(\d+)', None),
768
+ ]
769
+ )
770
+
771
+ @torch.jit.ignore
772
+ def set_grad_checkpointing(self, enable=True):
773
+ for s in self.stages:
774
+ s.grad_checkpointing = enable
775
+
776
+ @torch.jit.ignore()
777
+ def get_classifier(self) -> nn.Module:
778
+ """Method returns the classification head of the model.
779
+ Returns:
780
+ head (nn.Module): Current classification head
781
+ """
782
+ return self.head.fc
783
+
784
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
785
+ """Method results the classification head
786
+
787
+ Args:
788
+ num_classes (int): Number of classes to be predicted
789
+ global_pool (str): Unused
790
+ """
791
+ self.num_classes = num_classes
792
+ self.head.reset(num_classes, global_pool)
793
+
794
+ def forward_intermediates(
795
+ self,
796
+ x: torch.Tensor,
797
+ indices: Optional[Union[int, List[int]]] = None,
798
+ norm: bool = False,
799
+ stop_early: bool = False,
800
+ output_fmt: str = 'NCHW',
801
+ intermediates_only: bool = False,
802
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
803
+ """ Forward features that returns intermediates.
804
+
805
+ Args:
806
+ x: Input image tensor
807
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
808
+ norm: Apply norm layer to compatible intermediates
809
+ stop_early: Stop iterating over blocks when last desired intermediate hit
810
+ output_fmt: Shape of intermediate feature outputs
811
+ intermediates_only: Only return intermediate features
812
+ Returns:
813
+
814
+ """
815
+ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
816
+ intermediates = []
817
+ take_indices, max_index = feature_take_indices(len(self.stages), indices)
818
+
819
+ # forward pass
820
+ x = self.patch_embed(x)
821
+
822
+ if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
823
+ stages = self.stages
824
+ else:
825
+ stages = self.stages[:max_index + 1]
826
+ for i, stage in enumerate(stages):
827
+ x = stage(x)
828
+ if i in take_indices:
829
+ intermediates.append(x)
830
+
831
+ if intermediates_only:
832
+ return intermediates
833
+
834
+ return x, intermediates
835
+
836
+ def prune_intermediate_layers(
837
+ self,
838
+ indices: Union[int, List[int]] = 1,
839
+ prune_norm: bool = False,
840
+ prune_head: bool = True,
841
+ ):
842
+ """ Prune layers not required for specified intermediates.
843
+ """
844
+ take_indices, max_index = feature_take_indices(len(self.stages), indices)
845
+ self.stages = self.stages[:max_index + 1] # truncate blocks
846
+ if prune_head:
847
+ self.reset_classifier(0, '')
848
+ return take_indices
849
+
850
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
851
+ x = self.patch_embed(x)
852
+ x = self.stages(x)
853
+ return x
854
+
855
+ def forward_head(self, x, pre_logits: bool = False):
856
+ return self.head(x, pre_logits=True) if pre_logits else self.head(x)
857
+
858
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
859
+ x = self.forward_features(x)
860
+ x = self.forward_head(x)
861
+ return x
862
+
863
+
864
+ def init_weights(module: nn.Module, name: str = ''):
865
+ # FIXME WIP determining if there's a better weight init
866
+ if isinstance(module, nn.Linear):
867
+ if 'qkv' in name:
868
+ # treat the weights of Q, K, V separately
869
+ val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
870
+ nn.init.uniform_(module.weight, -val, val)
871
+ elif 'head' in name:
872
+ nn.init.zeros_(module.weight)
873
+ else:
874
+ nn.init.xavier_uniform_(module.weight)
875
+ if module.bias is not None:
876
+ nn.init.zeros_(module.bias)
877
+ elif hasattr(module, 'init_weights'):
878
+ module.init_weights()
879
+
880
+
881
+ def checkpoint_filter_fn(state_dict, model):
882
+ """ convert patch embedding weight from manual patchify + linear proj to conv"""
883
+ state_dict = state_dict.get('model', state_dict)
884
+ state_dict = state_dict.get('state_dict', state_dict)
885
+ if 'head.fc.weight' in state_dict:
886
+ return state_dict
887
+ out_dict = {}
888
+ for k, v in state_dict.items():
889
+ if 'tau' in k:
890
+ # convert old tau based checkpoints -> logit_scale (inverse)
891
+ v = torch.log(1 / v)
892
+ k = k.replace('tau', 'logit_scale')
893
+ k = k.replace('head.', 'head.fc.')
894
+ out_dict[k] = v
895
+ return out_dict
896
+
897
+
898
+ def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs):
899
+ default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1))))
900
+ out_indices = kwargs.pop('out_indices', default_out_indices)
901
+
902
+ model = build_model_with_cfg(
903
+ SwinTransformerV2Cr, variant, pretrained,
904
+ pretrained_filter_fn=checkpoint_filter_fn,
905
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
906
+ **kwargs
907
+ )
908
+ return model
909
+
910
+
911
+ def _cfg(url='', **kwargs):
912
+ return {
913
+ 'url': url,
914
+ 'num_classes': 1000,
915
+ 'input_size': (3, 224, 224),
916
+ 'pool_size': (7, 7),
917
+ 'crop_pct': 0.9,
918
+ 'interpolation': 'bicubic',
919
+ 'fixed_input_size': True,
920
+ 'mean': IMAGENET_DEFAULT_MEAN,
921
+ 'std': IMAGENET_DEFAULT_STD,
922
+ 'first_conv': 'patch_embed.proj',
923
+ 'classifier': 'head.fc',
924
+ **kwargs,
925
+ }
926
+
927
+
928
+ default_cfgs = generate_default_cfgs({
929
+ 'swinv2_cr_tiny_384.untrained': _cfg(
930
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
931
+ 'swinv2_cr_tiny_224.untrained': _cfg(
932
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
933
+ 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg(
934
+ hf_hub_id='timm/',
935
+ url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth",
936
+ input_size=(3, 224, 224), crop_pct=0.9),
937
+ 'swinv2_cr_small_384.untrained': _cfg(
938
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
939
+ 'swinv2_cr_small_224.sw_in1k': _cfg(
940
+ hf_hub_id='timm/',
941
+ url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth",
942
+ input_size=(3, 224, 224), crop_pct=0.9),
943
+ 'swinv2_cr_small_ns_224.sw_in1k': _cfg(
944
+ hf_hub_id='timm/',
945
+ url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth",
946
+ input_size=(3, 224, 224), crop_pct=0.9),
947
+ 'swinv2_cr_small_ns_256.untrained': _cfg(
948
+ url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)),
949
+ 'swinv2_cr_base_384.untrained': _cfg(
950
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
951
+ 'swinv2_cr_base_224.untrained': _cfg(
952
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
953
+ 'swinv2_cr_base_ns_224.untrained': _cfg(
954
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
955
+ 'swinv2_cr_large_384.untrained': _cfg(
956
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
957
+ 'swinv2_cr_large_224.untrained': _cfg(
958
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
959
+ 'swinv2_cr_huge_384.untrained': _cfg(
960
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
961
+ 'swinv2_cr_huge_224.untrained': _cfg(
962
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
963
+ 'swinv2_cr_giant_384.untrained': _cfg(
964
+ url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)),
965
+ 'swinv2_cr_giant_224.untrained': _cfg(
966
+ url="", input_size=(3, 224, 224), crop_pct=0.9),
967
+ })
968
+
969
+
970
+ @register_model
971
+ def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
972
+ """Swin-T V2 CR @ 384x384, trained ImageNet-1k"""
973
+ model_args = dict(
974
+ embed_dim=96,
975
+ depths=(2, 2, 6, 2),
976
+ num_heads=(3, 6, 12, 24),
977
+ )
978
+ return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs))
979
+
980
+
981
+ @register_model
982
+ def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
983
+ """Swin-T V2 CR @ 224x224, trained ImageNet-1k"""
984
+ model_args = dict(
985
+ embed_dim=96,
986
+ depths=(2, 2, 6, 2),
987
+ num_heads=(3, 6, 12, 24),
988
+ )
989
+ return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
990
+
991
+
992
+ @register_model
993
+ def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
994
+ """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms.
995
+ ** Experimental, may make default if results are improved. **
996
+ """
997
+ model_args = dict(
998
+ embed_dim=96,
999
+ depths=(2, 2, 6, 2),
1000
+ num_heads=(3, 6, 12, 24),
1001
+ extra_norm_stage=True,
1002
+ )
1003
+ return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
1004
+
1005
+
1006
+ @register_model
1007
+ def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1008
+ """Swin-S V2 CR @ 384x384, trained ImageNet-1k"""
1009
+ model_args = dict(
1010
+ embed_dim=96,
1011
+ depths=(2, 2, 18, 2),
1012
+ num_heads=(3, 6, 12, 24),
1013
+ )
1014
+ return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs))
1015
+
1016
+
1017
+ @register_model
1018
+ def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1019
+ """Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
1020
+ model_args = dict(
1021
+ embed_dim=96,
1022
+ depths=(2, 2, 18, 2),
1023
+ num_heads=(3, 6, 12, 24),
1024
+ )
1025
+ return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
1026
+
1027
+
1028
+ @register_model
1029
+ def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1030
+ """Swin-S V2 CR @ 224x224, trained ImageNet-1k"""
1031
+ model_args = dict(
1032
+ embed_dim=96,
1033
+ depths=(2, 2, 18, 2),
1034
+ num_heads=(3, 6, 12, 24),
1035
+ extra_norm_stage=True,
1036
+ )
1037
+ return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
1038
+
1039
+
1040
+ @register_model
1041
+ def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1042
+ """Swin-S V2 CR @ 256x256, trained ImageNet-1k"""
1043
+ model_args = dict(
1044
+ embed_dim=96,
1045
+ depths=(2, 2, 18, 2),
1046
+ num_heads=(3, 6, 12, 24),
1047
+ extra_norm_stage=True,
1048
+ )
1049
+ return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs))
1050
+
1051
+
1052
+ @register_model
1053
+ def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1054
+ """Swin-B V2 CR @ 384x384, trained ImageNet-1k"""
1055
+ model_args = dict(
1056
+ embed_dim=128,
1057
+ depths=(2, 2, 18, 2),
1058
+ num_heads=(4, 8, 16, 32),
1059
+ )
1060
+ return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs))
1061
+
1062
+
1063
+ @register_model
1064
+ def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1065
+ """Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
1066
+ model_args = dict(
1067
+ embed_dim=128,
1068
+ depths=(2, 2, 18, 2),
1069
+ num_heads=(4, 8, 16, 32),
1070
+ )
1071
+ return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
1072
+
1073
+
1074
+ @register_model
1075
+ def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1076
+ """Swin-B V2 CR @ 224x224, trained ImageNet-1k"""
1077
+ model_args = dict(
1078
+ embed_dim=128,
1079
+ depths=(2, 2, 18, 2),
1080
+ num_heads=(4, 8, 16, 32),
1081
+ extra_norm_stage=True,
1082
+ )
1083
+ return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs))
1084
+
1085
+
1086
+ @register_model
1087
+ def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1088
+ """Swin-L V2 CR @ 384x384, trained ImageNet-1k"""
1089
+ model_args = dict(
1090
+ embed_dim=192,
1091
+ depths=(2, 2, 18, 2),
1092
+ num_heads=(6, 12, 24, 48),
1093
+ )
1094
+ return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs))
1095
+
1096
+
1097
+ @register_model
1098
+ def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1099
+ """Swin-L V2 CR @ 224x224, trained ImageNet-1k"""
1100
+ model_args = dict(
1101
+ embed_dim=192,
1102
+ depths=(2, 2, 18, 2),
1103
+ num_heads=(6, 12, 24, 48),
1104
+ )
1105
+ return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs))
1106
+
1107
+
1108
+ @register_model
1109
+ def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1110
+ """Swin-H V2 CR @ 384x384, trained ImageNet-1k"""
1111
+ model_args = dict(
1112
+ embed_dim=352,
1113
+ depths=(2, 2, 18, 2),
1114
+ num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values
1115
+ extra_norm_period=6,
1116
+ )
1117
+ return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs))
1118
+
1119
+
1120
+ @register_model
1121
+ def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1122
+ """Swin-H V2 CR @ 224x224, trained ImageNet-1k"""
1123
+ model_args = dict(
1124
+ embed_dim=352,
1125
+ depths=(2, 2, 18, 2),
1126
+ num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values
1127
+ extra_norm_period=6,
1128
+ )
1129
+ return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs))
1130
+
1131
+
1132
+ @register_model
1133
+ def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1134
+ """Swin-G V2 CR @ 384x384, trained ImageNet-1k"""
1135
+ model_args = dict(
1136
+ embed_dim=512,
1137
+ depths=(2, 2, 42, 2),
1138
+ num_heads=(16, 32, 64, 128),
1139
+ extra_norm_period=6,
1140
+ )
1141
+ return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs))
1142
+
1143
+
1144
+ @register_model
1145
+ def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr:
1146
+ """Swin-G V2 CR @ 224x224, trained ImageNet-1k"""
1147
+ model_args = dict(
1148
+ embed_dim=512,
1149
+ depths=(2, 2, 42, 2),
1150
+ num_heads=(16, 32, 64, 128),
1151
+ extra_norm_period=6,
1152
+ )
1153
+ return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/tiny_vit.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TinyViT
2
+
3
+ Paper: `TinyViT: Fast Pretraining Distillation for Small Vision Transformers`
4
+ - https://arxiv.org/abs/2207.10666
5
+
6
+ Adapted from official impl at https://github.com/microsoft/Cream/tree/main/TinyViT
7
+ """
8
+
9
+ __all__ = ['TinyVit']
10
+
11
+ import itertools
12
+ from functools import partial
13
+ from typing import Dict, Optional
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+
19
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
20
+ from timm.layers import LayerNorm2d, NormMlpClassifierHead, DropPath,\
21
+ trunc_normal_, resize_rel_pos_bias_table_levit, use_fused_attn
22
+ from ._builder import build_model_with_cfg
23
+ from ._features_fx import register_notrace_module
24
+ from ._manipulate import checkpoint_seq
25
+ from ._registry import register_model, generate_default_cfgs
26
+
27
+
28
+ class ConvNorm(torch.nn.Sequential):
29
+ def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
30
+ super().__init__()
31
+ self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
32
+ self.bn = nn.BatchNorm2d(out_chs)
33
+ torch.nn.init.constant_(self.bn.weight, bn_weight_init)
34
+ torch.nn.init.constant_(self.bn.bias, 0)
35
+
36
+ @torch.no_grad()
37
+ def fuse(self):
38
+ c, bn = self.conv, self.bn
39
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
40
+ w = c.weight * w[:, None, None, None]
41
+ b = bn.bias - bn.running_mean * bn.weight / \
42
+ (bn.running_var + bn.eps) ** 0.5
43
+ m = torch.nn.Conv2d(
44
+ w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
45
+ stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
46
+ m.weight.data.copy_(w)
47
+ m.bias.data.copy_(b)
48
+ return m
49
+
50
+
51
+ class PatchEmbed(nn.Module):
52
+ def __init__(self, in_chs, out_chs, act_layer):
53
+ super().__init__()
54
+ self.stride = 4
55
+ self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1)
56
+ self.act = act_layer()
57
+ self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1)
58
+
59
+ def forward(self, x):
60
+ x = self.conv1(x)
61
+ x = self.act(x)
62
+ x = self.conv2(x)
63
+ return x
64
+
65
+
66
+ class MBConv(nn.Module):
67
+ def __init__(self, in_chs, out_chs, expand_ratio, act_layer, drop_path):
68
+ super().__init__()
69
+ mid_chs = int(in_chs * expand_ratio)
70
+ self.conv1 = ConvNorm(in_chs, mid_chs, ks=1)
71
+ self.act1 = act_layer()
72
+ self.conv2 = ConvNorm(mid_chs, mid_chs, ks=3, stride=1, pad=1, groups=mid_chs)
73
+ self.act2 = act_layer()
74
+ self.conv3 = ConvNorm(mid_chs, out_chs, ks=1, bn_weight_init=0.0)
75
+ self.act3 = act_layer()
76
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
77
+
78
+ def forward(self, x):
79
+ shortcut = x
80
+ x = self.conv1(x)
81
+ x = self.act1(x)
82
+ x = self.conv2(x)
83
+ x = self.act2(x)
84
+ x = self.conv3(x)
85
+ x = self.drop_path(x)
86
+ x += shortcut
87
+ x = self.act3(x)
88
+ return x
89
+
90
+
91
+ class PatchMerging(nn.Module):
92
+ def __init__(self, dim, out_dim, act_layer):
93
+ super().__init__()
94
+ self.conv1 = ConvNorm(dim, out_dim, 1, 1, 0)
95
+ self.act1 = act_layer()
96
+ self.conv2 = ConvNorm(out_dim, out_dim, 3, 2, 1, groups=out_dim)
97
+ self.act2 = act_layer()
98
+ self.conv3 = ConvNorm(out_dim, out_dim, 1, 1, 0)
99
+
100
+ def forward(self, x):
101
+ x = self.conv1(x)
102
+ x = self.act1(x)
103
+ x = self.conv2(x)
104
+ x = self.act2(x)
105
+ x = self.conv3(x)
106
+ return x
107
+
108
+
109
+ class ConvLayer(nn.Module):
110
+ def __init__(
111
+ self,
112
+ dim,
113
+ depth,
114
+ act_layer,
115
+ drop_path=0.,
116
+ conv_expand_ratio=4.,
117
+ ):
118
+ super().__init__()
119
+ self.dim = dim
120
+ self.depth = depth
121
+ self.blocks = nn.Sequential(*[
122
+ MBConv(
123
+ dim, dim, conv_expand_ratio, act_layer,
124
+ drop_path[i] if isinstance(drop_path, list) else drop_path,
125
+ )
126
+ for i in range(depth)
127
+ ])
128
+
129
+ def forward(self, x):
130
+ x = self.blocks(x)
131
+ return x
132
+
133
+
134
+ class NormMlp(nn.Module):
135
+ def __init__(
136
+ self,
137
+ in_features,
138
+ hidden_features=None,
139
+ out_features=None,
140
+ norm_layer=nn.LayerNorm,
141
+ act_layer=nn.GELU,
142
+ drop=0.,
143
+ ):
144
+ super().__init__()
145
+ out_features = out_features or in_features
146
+ hidden_features = hidden_features or in_features
147
+ self.norm = norm_layer(in_features)
148
+ self.fc1 = nn.Linear(in_features, hidden_features)
149
+ self.act = act_layer()
150
+ self.drop1 = nn.Dropout(drop)
151
+ self.fc2 = nn.Linear(hidden_features, out_features)
152
+ self.drop2 = nn.Dropout(drop)
153
+
154
+ def forward(self, x):
155
+ x = self.norm(x)
156
+ x = self.fc1(x)
157
+ x = self.act(x)
158
+ x = self.drop1(x)
159
+ x = self.fc2(x)
160
+ x = self.drop2(x)
161
+ return x
162
+
163
+
164
+ class Attention(torch.nn.Module):
165
+ fused_attn: torch.jit.Final[bool]
166
+ attention_bias_cache: Dict[str, torch.Tensor]
167
+
168
+ def __init__(
169
+ self,
170
+ dim,
171
+ key_dim,
172
+ num_heads=8,
173
+ attn_ratio=4,
174
+ resolution=(14, 14),
175
+ ):
176
+ super().__init__()
177
+ assert isinstance(resolution, tuple) and len(resolution) == 2
178
+ self.num_heads = num_heads
179
+ self.scale = key_dim ** -0.5
180
+ self.key_dim = key_dim
181
+ self.val_dim = int(attn_ratio * key_dim)
182
+ self.out_dim = self.val_dim * num_heads
183
+ self.attn_ratio = attn_ratio
184
+ self.resolution = resolution
185
+ self.fused_attn = use_fused_attn()
186
+
187
+ self.norm = nn.LayerNorm(dim)
188
+ self.qkv = nn.Linear(dim, num_heads * (self.val_dim + 2 * key_dim))
189
+ self.proj = nn.Linear(self.out_dim, dim)
190
+
191
+ points = list(itertools.product(range(resolution[0]), range(resolution[1])))
192
+ N = len(points)
193
+ attention_offsets = {}
194
+ idxs = []
195
+ for p1 in points:
196
+ for p2 in points:
197
+ offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
198
+ if offset not in attention_offsets:
199
+ attention_offsets[offset] = len(attention_offsets)
200
+ idxs.append(attention_offsets[offset])
201
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
202
+ self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
203
+ self.attention_bias_cache = {}
204
+
205
+ @torch.no_grad()
206
+ def train(self, mode=True):
207
+ super().train(mode)
208
+ if mode and self.attention_bias_cache:
209
+ self.attention_bias_cache = {} # clear ab cache
210
+
211
+ def get_attention_biases(self, device: torch.device) -> torch.Tensor:
212
+ if torch.jit.is_tracing() or self.training:
213
+ return self.attention_biases[:, self.attention_bias_idxs]
214
+ else:
215
+ device_key = str(device)
216
+ if device_key not in self.attention_bias_cache:
217
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
218
+ return self.attention_bias_cache[device_key]
219
+
220
+ def forward(self, x):
221
+ attn_bias = self.get_attention_biases(x.device)
222
+ B, N, _ = x.shape
223
+ # Normalization
224
+ x = self.norm(x)
225
+ qkv = self.qkv(x)
226
+ # (B, N, num_heads, d)
227
+ q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
228
+ # (B, num_heads, N, d)
229
+ q = q.permute(0, 2, 1, 3)
230
+ k = k.permute(0, 2, 1, 3)
231
+ v = v.permute(0, 2, 1, 3)
232
+
233
+ if self.fused_attn:
234
+ x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
235
+ else:
236
+ q = q * self.scale
237
+ attn = q @ k.transpose(-2, -1)
238
+ attn = attn + attn_bias
239
+ attn = attn.softmax(dim=-1)
240
+ x = attn @ v
241
+ x = x.transpose(1, 2).reshape(B, N, self.out_dim)
242
+ x = self.proj(x)
243
+ return x
244
+
245
+
246
+ class TinyVitBlock(nn.Module):
247
+ """ TinyViT Block.
248
+
249
+ Args:
250
+ dim (int): Number of input channels.
251
+ num_heads (int): Number of attention heads.
252
+ window_size (int): Window size.
253
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
254
+ drop (float, optional): Dropout rate. Default: 0.0
255
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
256
+ local_conv_size (int): the kernel size of the convolution between
257
+ Attention and MLP. Default: 3
258
+ act_layer: the activation function. Default: nn.GELU
259
+ """
260
+
261
+ def __init__(
262
+ self,
263
+ dim,
264
+ num_heads,
265
+ window_size=7,
266
+ mlp_ratio=4.,
267
+ drop=0.,
268
+ drop_path=0.,
269
+ local_conv_size=3,
270
+ act_layer=nn.GELU
271
+ ):
272
+ super().__init__()
273
+ self.dim = dim
274
+ self.num_heads = num_heads
275
+ assert window_size > 0, 'window_size must be greater than 0'
276
+ self.window_size = window_size
277
+ self.mlp_ratio = mlp_ratio
278
+
279
+ assert dim % num_heads == 0, 'dim must be divisible by num_heads'
280
+ head_dim = dim // num_heads
281
+
282
+ window_resolution = (window_size, window_size)
283
+ self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
284
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
285
+
286
+
287
+ self.mlp = NormMlp(
288
+ in_features=dim,
289
+ hidden_features=int(dim * mlp_ratio),
290
+ act_layer=act_layer,
291
+ drop=drop,
292
+ )
293
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
294
+
295
+ pad = local_conv_size // 2
296
+ self.local_conv = ConvNorm(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
297
+
298
+ def forward(self, x):
299
+ B, H, W, C = x.shape
300
+ L = H * W
301
+
302
+ shortcut = x
303
+ if H == self.window_size and W == self.window_size:
304
+ x = x.reshape(B, L, C)
305
+ x = self.attn(x)
306
+ x = x.view(B, H, W, C)
307
+ else:
308
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
309
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
310
+ padding = pad_b > 0 or pad_r > 0
311
+ if padding:
312
+ x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
313
+
314
+ # window partition
315
+ pH, pW = H + pad_b, W + pad_r
316
+ nH = pH // self.window_size
317
+ nW = pW // self.window_size
318
+ x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(
319
+ B * nH * nW, self.window_size * self.window_size, C
320
+ )
321
+
322
+ x = self.attn(x)
323
+
324
+ # window reverse
325
+ x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
326
+
327
+ if padding:
328
+ x = x[:, :H, :W].contiguous()
329
+ x = shortcut + self.drop_path1(x)
330
+
331
+ x = x.permute(0, 3, 1, 2)
332
+ x = self.local_conv(x)
333
+ x = x.reshape(B, C, L).transpose(1, 2)
334
+
335
+ x = x + self.drop_path2(self.mlp(x))
336
+ return x.view(B, H, W, C)
337
+
338
+ def extra_repr(self) -> str:
339
+ return f"dim={self.dim}, num_heads={self.num_heads}, " \
340
+ f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
341
+
342
+
343
+ register_notrace_module(TinyVitBlock)
344
+
345
+
346
+ class TinyVitStage(nn.Module):
347
+ """ A basic TinyViT layer for one stage.
348
+
349
+ Args:
350
+ dim (int): Number of input channels.
351
+ out_dim: the output dimension of the layer
352
+ depth (int): Number of blocks.
353
+ num_heads (int): Number of attention heads.
354
+ window_size (int): Local window size.
355
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
356
+ drop (float, optional): Dropout rate. Default: 0.0
357
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
358
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
359
+ local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
360
+ act_layer: the activation function. Default: nn.GELU
361
+ """
362
+
363
+ def __init__(
364
+ self,
365
+ dim,
366
+ out_dim,
367
+ depth,
368
+ num_heads,
369
+ window_size,
370
+ mlp_ratio=4.,
371
+ drop=0.,
372
+ drop_path=0.,
373
+ downsample=None,
374
+ local_conv_size=3,
375
+ act_layer=nn.GELU,
376
+ ):
377
+
378
+ super().__init__()
379
+ self.depth = depth
380
+ self.out_dim = out_dim
381
+
382
+ # patch merging layer
383
+ if downsample is not None:
384
+ self.downsample = downsample(
385
+ dim=dim,
386
+ out_dim=out_dim,
387
+ act_layer=act_layer,
388
+ )
389
+ else:
390
+ self.downsample = nn.Identity()
391
+ assert dim == out_dim
392
+
393
+ # build blocks
394
+ self.blocks = nn.Sequential(*[
395
+ TinyVitBlock(
396
+ dim=out_dim,
397
+ num_heads=num_heads,
398
+ window_size=window_size,
399
+ mlp_ratio=mlp_ratio,
400
+ drop=drop,
401
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
402
+ local_conv_size=local_conv_size,
403
+ act_layer=act_layer,
404
+ )
405
+ for i in range(depth)])
406
+
407
+ def forward(self, x):
408
+ x = self.downsample(x)
409
+ x = x.permute(0, 2, 3, 1) # BCHW -> BHWC
410
+ x = self.blocks(x)
411
+ x = x.permute(0, 3, 1, 2) # BHWC -> BCHW
412
+ return x
413
+
414
+ def extra_repr(self) -> str:
415
+ return f"dim={self.out_dim}, depth={self.depth}"
416
+
417
+
418
+ class TinyVit(nn.Module):
419
+ def __init__(
420
+ self,
421
+ in_chans=3,
422
+ num_classes=1000,
423
+ global_pool='avg',
424
+ embed_dims=(96, 192, 384, 768),
425
+ depths=(2, 2, 6, 2),
426
+ num_heads=(3, 6, 12, 24),
427
+ window_sizes=(7, 7, 14, 7),
428
+ mlp_ratio=4.,
429
+ drop_rate=0.,
430
+ drop_path_rate=0.1,
431
+ use_checkpoint=False,
432
+ mbconv_expand_ratio=4.0,
433
+ local_conv_size=3,
434
+ act_layer=nn.GELU,
435
+ ):
436
+ super().__init__()
437
+
438
+ self.num_classes = num_classes
439
+ self.depths = depths
440
+ self.num_stages = len(depths)
441
+ self.mlp_ratio = mlp_ratio
442
+ self.grad_checkpointing = use_checkpoint
443
+
444
+ self.patch_embed = PatchEmbed(
445
+ in_chs=in_chans,
446
+ out_chs=embed_dims[0],
447
+ act_layer=act_layer,
448
+ )
449
+
450
+ # stochastic depth rate rule
451
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
452
+
453
+ # build stages
454
+ self.stages = nn.Sequential()
455
+ stride = self.patch_embed.stride
456
+ prev_dim = embed_dims[0]
457
+ self.feature_info = []
458
+ for stage_idx in range(self.num_stages):
459
+ if stage_idx == 0:
460
+ stage = ConvLayer(
461
+ dim=prev_dim,
462
+ depth=depths[stage_idx],
463
+ act_layer=act_layer,
464
+ drop_path=dpr[:depths[stage_idx]],
465
+ conv_expand_ratio=mbconv_expand_ratio,
466
+ )
467
+ else:
468
+ out_dim = embed_dims[stage_idx]
469
+ drop_path_rate = dpr[sum(depths[:stage_idx]):sum(depths[:stage_idx + 1])]
470
+ stage = TinyVitStage(
471
+ dim=embed_dims[stage_idx - 1],
472
+ out_dim=out_dim,
473
+ depth=depths[stage_idx],
474
+ num_heads=num_heads[stage_idx],
475
+ window_size=window_sizes[stage_idx],
476
+ mlp_ratio=self.mlp_ratio,
477
+ drop=drop_rate,
478
+ local_conv_size=local_conv_size,
479
+ drop_path=drop_path_rate,
480
+ downsample=PatchMerging,
481
+ act_layer=act_layer,
482
+ )
483
+ prev_dim = out_dim
484
+ stride *= 2
485
+ self.stages.append(stage)
486
+ self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{stage_idx}')]
487
+
488
+ # Classifier head
489
+ self.num_features = self.head_hidden_size = embed_dims[-1]
490
+
491
+ norm_layer_cf = partial(LayerNorm2d, eps=1e-5)
492
+ self.head = NormMlpClassifierHead(
493
+ self.num_features,
494
+ num_classes,
495
+ pool_type=global_pool,
496
+ norm_layer=norm_layer_cf,
497
+ )
498
+
499
+ # init weights
500
+ self.apply(self._init_weights)
501
+
502
+ def _init_weights(self, m):
503
+ if isinstance(m, nn.Linear):
504
+ trunc_normal_(m.weight, std=.02)
505
+ if isinstance(m, nn.Linear) and m.bias is not None:
506
+ nn.init.constant_(m.bias, 0)
507
+
508
+ @torch.jit.ignore
509
+ def no_weight_decay_keywords(self):
510
+ return {'attention_biases'}
511
+
512
+ @torch.jit.ignore
513
+ def no_weight_decay(self):
514
+ return {x for x in self.state_dict().keys() if 'attention_biases' in x}
515
+
516
+ @torch.jit.ignore
517
+ def group_matcher(self, coarse=False):
518
+ matcher = dict(
519
+ stem=r'^patch_embed',
520
+ blocks=r'^stages\.(\d+)' if coarse else [
521
+ (r'^stages\.(\d+).downsample', (0,)),
522
+ (r'^stages\.(\d+)\.\w+\.(\d+)', None),
523
+ ]
524
+ )
525
+ return matcher
526
+
527
+ @torch.jit.ignore
528
+ def set_grad_checkpointing(self, enable=True):
529
+ self.grad_checkpointing = enable
530
+
531
+ @torch.jit.ignore
532
+ def get_classifier(self) -> nn.Module:
533
+ return self.head.fc
534
+
535
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
536
+ self.num_classes = num_classes
537
+ self.head.reset(num_classes, pool_type=global_pool)
538
+
539
+ def forward_features(self, x):
540
+ x = self.patch_embed(x)
541
+ if self.grad_checkpointing and not torch.jit.is_scripting():
542
+ x = checkpoint_seq(self.stages, x)
543
+ else:
544
+ x = self.stages(x)
545
+ return x
546
+
547
+ def forward_head(self, x, pre_logits: bool = False):
548
+ x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
549
+ return x
550
+
551
+ def forward(self, x):
552
+ x = self.forward_features(x)
553
+ x = self.forward_head(x)
554
+ return x
555
+
556
+
557
+ def checkpoint_filter_fn(state_dict, model):
558
+ if 'model' in state_dict.keys():
559
+ state_dict = state_dict['model']
560
+ target_sd = model.state_dict()
561
+ out_dict = {}
562
+ for k, v in state_dict.items():
563
+ if k.endswith('attention_bias_idxs'):
564
+ continue
565
+ if 'attention_biases' in k:
566
+ # TODO: whether move this func into model for dynamic input resolution? (high risk)
567
+ v = resize_rel_pos_bias_table_levit(v.T, target_sd[k].shape[::-1]).T
568
+ out_dict[k] = v
569
+ return out_dict
570
+
571
+
572
+ def _cfg(url='', **kwargs):
573
+ return {
574
+ 'url': url,
575
+ 'num_classes': 1000,
576
+ 'mean': IMAGENET_DEFAULT_MEAN,
577
+ 'std': IMAGENET_DEFAULT_STD,
578
+ 'first_conv': 'patch_embed.conv1.conv',
579
+ 'classifier': 'head.fc',
580
+ 'pool_size': (7, 7),
581
+ 'input_size': (3, 224, 224),
582
+ 'crop_pct': 0.95,
583
+ **kwargs,
584
+ }
585
+
586
+
587
+ default_cfgs = generate_default_cfgs({
588
+ 'tiny_vit_5m_224.dist_in22k': _cfg(
589
+ hf_hub_id='timm/',
590
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22k_distill.pth',
591
+ num_classes=21841
592
+ ),
593
+ 'tiny_vit_5m_224.dist_in22k_ft_in1k': _cfg(
594
+ hf_hub_id='timm/',
595
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth'
596
+ ),
597
+ 'tiny_vit_5m_224.in1k': _cfg(
598
+ hf_hub_id='timm/',
599
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth'
600
+ ),
601
+ 'tiny_vit_11m_224.dist_in22k': _cfg(
602
+ hf_hub_id='timm/',
603
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22k_distill.pth',
604
+ num_classes=21841
605
+ ),
606
+ 'tiny_vit_11m_224.dist_in22k_ft_in1k': _cfg(
607
+ hf_hub_id='timm/',
608
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth'
609
+ ),
610
+ 'tiny_vit_11m_224.in1k': _cfg(
611
+ hf_hub_id='timm/',
612
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth'
613
+ ),
614
+ 'tiny_vit_21m_224.dist_in22k': _cfg(
615
+ hf_hub_id='timm/',
616
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22k_distill.pth',
617
+ num_classes=21841
618
+ ),
619
+ 'tiny_vit_21m_224.dist_in22k_ft_in1k': _cfg(
620
+ hf_hub_id='timm/',
621
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth'
622
+ ),
623
+ 'tiny_vit_21m_224.in1k': _cfg(
624
+ hf_hub_id='timm/',
625
+ #url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth'
626
+ ),
627
+ 'tiny_vit_21m_384.dist_in22k_ft_in1k': _cfg(
628
+ hf_hub_id='timm/',
629
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth',
630
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
631
+ ),
632
+ 'tiny_vit_21m_512.dist_in22k_ft_in1k': _cfg(
633
+ hf_hub_id='timm/',
634
+ # url='https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth',
635
+ input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash',
636
+ ),
637
+ })
638
+
639
+
640
+ def _create_tiny_vit(variant, pretrained=False, **kwargs):
641
+ out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
642
+ model = build_model_with_cfg(
643
+ TinyVit,
644
+ variant,
645
+ pretrained,
646
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
647
+ pretrained_filter_fn=checkpoint_filter_fn,
648
+ **kwargs
649
+ )
650
+ return model
651
+
652
+
653
+ @register_model
654
+ def tiny_vit_5m_224(pretrained=False, **kwargs):
655
+ model_kwargs = dict(
656
+ embed_dims=[64, 128, 160, 320],
657
+ depths=[2, 2, 6, 2],
658
+ num_heads=[2, 4, 5, 10],
659
+ window_sizes=[7, 7, 14, 7],
660
+ drop_path_rate=0.0,
661
+ )
662
+ model_kwargs.update(kwargs)
663
+ return _create_tiny_vit('tiny_vit_5m_224', pretrained, **model_kwargs)
664
+
665
+
666
+ @register_model
667
+ def tiny_vit_11m_224(pretrained=False, **kwargs):
668
+ model_kwargs = dict(
669
+ embed_dims=[64, 128, 256, 448],
670
+ depths=[2, 2, 6, 2],
671
+ num_heads=[2, 4, 8, 14],
672
+ window_sizes=[7, 7, 14, 7],
673
+ drop_path_rate=0.1,
674
+ )
675
+ model_kwargs.update(kwargs)
676
+ return _create_tiny_vit('tiny_vit_11m_224', pretrained, **model_kwargs)
677
+
678
+
679
+ @register_model
680
+ def tiny_vit_21m_224(pretrained=False, **kwargs):
681
+ model_kwargs = dict(
682
+ embed_dims=[96, 192, 384, 576],
683
+ depths=[2, 2, 6, 2],
684
+ num_heads=[3, 6, 12, 18],
685
+ window_sizes=[7, 7, 14, 7],
686
+ drop_path_rate=0.2,
687
+ )
688
+ model_kwargs.update(kwargs)
689
+ return _create_tiny_vit('tiny_vit_21m_224', pretrained, **model_kwargs)
690
+
691
+
692
+ @register_model
693
+ def tiny_vit_21m_384(pretrained=False, **kwargs):
694
+ model_kwargs = dict(
695
+ embed_dims=[96, 192, 384, 576],
696
+ depths=[2, 2, 6, 2],
697
+ num_heads=[3, 6, 12, 18],
698
+ window_sizes=[12, 12, 24, 12],
699
+ drop_path_rate=0.1,
700
+ )
701
+ model_kwargs.update(kwargs)
702
+ return _create_tiny_vit('tiny_vit_21m_384', pretrained, **model_kwargs)
703
+
704
+
705
+ @register_model
706
+ def tiny_vit_21m_512(pretrained=False, **kwargs):
707
+ model_kwargs = dict(
708
+ embed_dims=[96, 192, 384, 576],
709
+ depths=[2, 2, 6, 2],
710
+ num_heads=[3, 6, 12, 18],
711
+ window_sizes=[16, 16, 32, 16],
712
+ drop_path_rate=0.1,
713
+ )
714
+ model_kwargs.update(kwargs)
715
+ return _create_tiny_vit('tiny_vit_21m_512', pretrained, **model_kwargs)
pytorch-image-models/timm/models/tnt.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Transformer in Transformer (TNT) in PyTorch
2
+
3
+ A PyTorch implement of TNT as described in
4
+ 'Transformer in Transformer' - https://arxiv.org/abs/2103.00112
5
+
6
+ The official mindspore code is released and available at
7
+ https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT
8
+ """
9
+ import math
10
+ from typing import Optional
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.utils.checkpoint import checkpoint
15
+
16
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
17
+ from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple
18
+ from ._builder import build_model_with_cfg
19
+ from ._registry import register_model
20
+ from .vision_transformer import resize_pos_embed
21
+
22
+ __all__ = ['TNT'] # model_registry will add each entrypoint fn to this
23
+
24
+
25
+ def _cfg(url='', **kwargs):
26
+ return {
27
+ 'url': url,
28
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
29
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
30
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
31
+ 'first_conv': 'pixel_embed.proj', 'classifier': 'head',
32
+ **kwargs
33
+ }
34
+
35
+
36
+ default_cfgs = {
37
+ 'tnt_s_patch16_224': _cfg(
38
+ url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar',
39
+ mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
40
+ ),
41
+ 'tnt_b_patch16_224': _cfg(
42
+ mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
43
+ ),
44
+ }
45
+
46
+
47
+ class Attention(nn.Module):
48
+ """ Multi-Head Attention
49
+ """
50
+ def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
51
+ super().__init__()
52
+ self.hidden_dim = hidden_dim
53
+ self.num_heads = num_heads
54
+ head_dim = hidden_dim // num_heads
55
+ self.head_dim = head_dim
56
+ self.scale = head_dim ** -0.5
57
+
58
+ self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias)
59
+ self.v = nn.Linear(dim, dim, bias=qkv_bias)
60
+ self.attn_drop = nn.Dropout(attn_drop, inplace=True)
61
+ self.proj = nn.Linear(dim, dim)
62
+ self.proj_drop = nn.Dropout(proj_drop, inplace=True)
63
+
64
+ def forward(self, x):
65
+ B, N, C = x.shape
66
+ qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
67
+ q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple)
68
+ v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
69
+
70
+ attn = (q @ k.transpose(-2, -1)) * self.scale
71
+ attn = attn.softmax(dim=-1)
72
+ attn = self.attn_drop(attn)
73
+
74
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
75
+ x = self.proj(x)
76
+ x = self.proj_drop(x)
77
+ return x
78
+
79
+
80
+ class Block(nn.Module):
81
+ """ TNT Block
82
+ """
83
+ def __init__(
84
+ self,
85
+ dim,
86
+ dim_out,
87
+ num_pixel,
88
+ num_heads_in=4,
89
+ num_heads_out=12,
90
+ mlp_ratio=4.,
91
+ qkv_bias=False,
92
+ proj_drop=0.,
93
+ attn_drop=0.,
94
+ drop_path=0.,
95
+ act_layer=nn.GELU,
96
+ norm_layer=nn.LayerNorm,
97
+ ):
98
+ super().__init__()
99
+ # Inner transformer
100
+ self.norm_in = norm_layer(dim)
101
+ self.attn_in = Attention(
102
+ dim,
103
+ dim,
104
+ num_heads=num_heads_in,
105
+ qkv_bias=qkv_bias,
106
+ attn_drop=attn_drop,
107
+ proj_drop=proj_drop,
108
+ )
109
+
110
+ self.norm_mlp_in = norm_layer(dim)
111
+ self.mlp_in = Mlp(
112
+ in_features=dim,
113
+ hidden_features=int(dim * 4),
114
+ out_features=dim,
115
+ act_layer=act_layer,
116
+ drop=proj_drop,
117
+ )
118
+
119
+ self.norm1_proj = norm_layer(dim)
120
+ self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True)
121
+
122
+ # Outer transformer
123
+ self.norm_out = norm_layer(dim_out)
124
+ self.attn_out = Attention(
125
+ dim_out,
126
+ dim_out,
127
+ num_heads=num_heads_out,
128
+ qkv_bias=qkv_bias,
129
+ attn_drop=attn_drop,
130
+ proj_drop=proj_drop,
131
+ )
132
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
133
+
134
+ self.norm_mlp = norm_layer(dim_out)
135
+ self.mlp = Mlp(
136
+ in_features=dim_out,
137
+ hidden_features=int(dim_out * mlp_ratio),
138
+ out_features=dim_out,
139
+ act_layer=act_layer,
140
+ drop=proj_drop,
141
+ )
142
+
143
+ def forward(self, pixel_embed, patch_embed):
144
+ # inner
145
+ pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
146
+ pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
147
+ # outer
148
+ B, N, C = patch_embed.size()
149
+ patch_embed = torch.cat(
150
+ [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))],
151
+ dim=1)
152
+ patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
153
+ patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
154
+ return pixel_embed, patch_embed
155
+
156
+
157
+ class PixelEmbed(nn.Module):
158
+ """ Image to Pixel Embedding
159
+ """
160
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
161
+ super().__init__()
162
+ img_size = to_2tuple(img_size)
163
+ patch_size = to_2tuple(patch_size)
164
+ # grid_size property necessary for resizing positional embedding
165
+ self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
166
+ num_patches = (self.grid_size[0]) * (self.grid_size[1])
167
+ self.img_size = img_size
168
+ self.num_patches = num_patches
169
+ self.in_dim = in_dim
170
+ new_patch_size = [math.ceil(ps / stride) for ps in patch_size]
171
+ self.new_patch_size = new_patch_size
172
+
173
+ self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
174
+ self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size)
175
+
176
+ def forward(self, x, pixel_pos):
177
+ B, C, H, W = x.shape
178
+ _assert(H == self.img_size[0],
179
+ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
180
+ _assert(W == self.img_size[1],
181
+ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).")
182
+ x = self.proj(x)
183
+ x = self.unfold(x)
184
+ x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1])
185
+ x = x + pixel_pos
186
+ x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2)
187
+ return x
188
+
189
+
190
+ class TNT(nn.Module):
191
+ """ Transformer in Transformer - https://arxiv.org/abs/2103.00112
192
+ """
193
+ def __init__(
194
+ self,
195
+ img_size=224,
196
+ patch_size=16,
197
+ in_chans=3,
198
+ num_classes=1000,
199
+ global_pool='token',
200
+ embed_dim=768,
201
+ inner_dim=48,
202
+ depth=12,
203
+ num_heads_inner=4,
204
+ num_heads_outer=12,
205
+ mlp_ratio=4.,
206
+ qkv_bias=False,
207
+ drop_rate=0.,
208
+ pos_drop_rate=0.,
209
+ proj_drop_rate=0.,
210
+ attn_drop_rate=0.,
211
+ drop_path_rate=0.,
212
+ norm_layer=nn.LayerNorm,
213
+ first_stride=4,
214
+ ):
215
+ super().__init__()
216
+ assert global_pool in ('', 'token', 'avg')
217
+ self.num_classes = num_classes
218
+ self.global_pool = global_pool
219
+ self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
220
+ self.grad_checkpointing = False
221
+
222
+ self.pixel_embed = PixelEmbed(
223
+ img_size=img_size,
224
+ patch_size=patch_size,
225
+ in_chans=in_chans,
226
+ in_dim=inner_dim,
227
+ stride=first_stride,
228
+ )
229
+ num_patches = self.pixel_embed.num_patches
230
+ self.num_patches = num_patches
231
+ new_patch_size = self.pixel_embed.new_patch_size
232
+ num_pixel = new_patch_size[0] * new_patch_size[1]
233
+
234
+ self.norm1_proj = norm_layer(num_pixel * inner_dim)
235
+ self.proj = nn.Linear(num_pixel * inner_dim, embed_dim)
236
+ self.norm2_proj = norm_layer(embed_dim)
237
+
238
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
239
+ self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
240
+ self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1]))
241
+ self.pos_drop = nn.Dropout(p=pos_drop_rate)
242
+
243
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
244
+ blocks = []
245
+ for i in range(depth):
246
+ blocks.append(Block(
247
+ dim=inner_dim,
248
+ dim_out=embed_dim,
249
+ num_pixel=num_pixel,
250
+ num_heads_in=num_heads_inner,
251
+ num_heads_out=num_heads_outer,
252
+ mlp_ratio=mlp_ratio,
253
+ qkv_bias=qkv_bias,
254
+ proj_drop=proj_drop_rate,
255
+ attn_drop=attn_drop_rate,
256
+ drop_path=dpr[i],
257
+ norm_layer=norm_layer,
258
+ ))
259
+ self.blocks = nn.ModuleList(blocks)
260
+ self.norm = norm_layer(embed_dim)
261
+
262
+ self.head_drop = nn.Dropout(drop_rate)
263
+ self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
264
+
265
+ trunc_normal_(self.cls_token, std=.02)
266
+ trunc_normal_(self.patch_pos, std=.02)
267
+ trunc_normal_(self.pixel_pos, std=.02)
268
+ self.apply(self._init_weights)
269
+
270
+ def _init_weights(self, m):
271
+ if isinstance(m, nn.Linear):
272
+ trunc_normal_(m.weight, std=.02)
273
+ if isinstance(m, nn.Linear) and m.bias is not None:
274
+ nn.init.constant_(m.bias, 0)
275
+ elif isinstance(m, nn.LayerNorm):
276
+ nn.init.constant_(m.bias, 0)
277
+ nn.init.constant_(m.weight, 1.0)
278
+
279
+ @torch.jit.ignore
280
+ def no_weight_decay(self):
281
+ return {'patch_pos', 'pixel_pos', 'cls_token'}
282
+
283
+ @torch.jit.ignore
284
+ def group_matcher(self, coarse=False):
285
+ matcher = dict(
286
+ stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos
287
+ blocks=[
288
+ (r'^blocks\.(\d+)', None),
289
+ (r'^norm', (99999,)),
290
+ ]
291
+ )
292
+ return matcher
293
+
294
+ @torch.jit.ignore
295
+ def set_grad_checkpointing(self, enable=True):
296
+ self.grad_checkpointing = enable
297
+
298
+ @torch.jit.ignore
299
+ def get_classifier(self) -> nn.Module:
300
+ return self.head
301
+
302
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
303
+ self.num_classes = num_classes
304
+ if global_pool is not None:
305
+ assert global_pool in ('', 'token', 'avg')
306
+ self.global_pool = global_pool
307
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
308
+
309
+ def forward_features(self, x):
310
+ B = x.shape[0]
311
+ pixel_embed = self.pixel_embed(x, self.pixel_pos)
312
+
313
+ patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1))))
314
+ patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1)
315
+ patch_embed = patch_embed + self.patch_pos
316
+ patch_embed = self.pos_drop(patch_embed)
317
+
318
+ if self.grad_checkpointing and not torch.jit.is_scripting():
319
+ for blk in self.blocks:
320
+ pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed)
321
+ else:
322
+ for blk in self.blocks:
323
+ pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
324
+
325
+ patch_embed = self.norm(patch_embed)
326
+ return patch_embed
327
+
328
+ def forward_head(self, x, pre_logits: bool = False):
329
+ if self.global_pool:
330
+ x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
331
+ x = self.head_drop(x)
332
+ return x if pre_logits else self.head(x)
333
+
334
+ def forward(self, x):
335
+ x = self.forward_features(x)
336
+ x = self.forward_head(x)
337
+ return x
338
+
339
+
340
+ def checkpoint_filter_fn(state_dict, model):
341
+ """ convert patch embedding weight from manual patchify + linear proj to conv"""
342
+ if state_dict['patch_pos'].shape != model.patch_pos.shape:
343
+ state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'],
344
+ model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size)
345
+ return state_dict
346
+
347
+
348
+ def _create_tnt(variant, pretrained=False, **kwargs):
349
+ if kwargs.get('features_only', None):
350
+ raise RuntimeError('features_only not implemented for Vision Transformer models.')
351
+
352
+ model = build_model_with_cfg(
353
+ TNT, variant, pretrained,
354
+ pretrained_filter_fn=checkpoint_filter_fn,
355
+ **kwargs)
356
+ return model
357
+
358
+
359
+ @register_model
360
+ def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT:
361
+ model_cfg = dict(
362
+ patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6,
363
+ qkv_bias=False)
364
+ model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
365
+ return model
366
+
367
+
368
+ @register_model
369
+ def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT:
370
+ model_cfg = dict(
371
+ patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10,
372
+ qkv_bias=False)
373
+ model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs))
374
+ return model
pytorch-image-models/timm/models/tresnet.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TResNet: High Performance GPU-Dedicated Architecture
3
+ https://arxiv.org/pdf/2003.13630.pdf
4
+
5
+ Original model: https://github.com/mrT23/TResNet
6
+
7
+ """
8
+ from collections import OrderedDict
9
+ from functools import partial
10
+ from typing import Optional
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+
15
+ from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule, ConvNormAct, DropPath
16
+ from ._builder import build_model_with_cfg
17
+ from ._manipulate import checkpoint_seq
18
+ from ._registry import register_model, generate_default_cfgs, register_model_deprecations
19
+
20
+ __all__ = ['TResNet'] # model_registry will add each entrypoint fn to this
21
+
22
+
23
+ class BasicBlock(nn.Module):
24
+ expansion = 1
25
+
26
+ def __init__(
27
+ self,
28
+ inplanes,
29
+ planes,
30
+ stride=1,
31
+ downsample=None,
32
+ use_se=True,
33
+ aa_layer=None,
34
+ drop_path_rate=0.
35
+ ):
36
+ super(BasicBlock, self).__init__()
37
+ self.downsample = downsample
38
+ self.stride = stride
39
+ act_layer = partial(nn.LeakyReLU, negative_slope=1e-3)
40
+
41
+ self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer)
42
+ self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False)
43
+ self.act = nn.ReLU(inplace=True)
44
+
45
+ rd_chs = max(planes * self.expansion // 4, 64)
46
+ self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None
47
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
48
+
49
+ def forward(self, x):
50
+ if self.downsample is not None:
51
+ shortcut = self.downsample(x)
52
+ else:
53
+ shortcut = x
54
+ out = self.conv1(x)
55
+ out = self.conv2(out)
56
+ if self.se is not None:
57
+ out = self.se(out)
58
+ out = self.drop_path(out) + shortcut
59
+ out = self.act(out)
60
+ return out
61
+
62
+
63
+ class Bottleneck(nn.Module):
64
+ expansion = 4
65
+
66
+ def __init__(
67
+ self,
68
+ inplanes,
69
+ planes,
70
+ stride=1,
71
+ downsample=None,
72
+ use_se=True,
73
+ act_layer=None,
74
+ aa_layer=None,
75
+ drop_path_rate=0.,
76
+ ):
77
+ super(Bottleneck, self).__init__()
78
+ self.downsample = downsample
79
+ self.stride = stride
80
+ act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3)
81
+
82
+ self.conv1 = ConvNormAct(
83
+ inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer)
84
+ self.conv2 = ConvNormAct(
85
+ planes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer)
86
+
87
+ reduction_chs = max(planes * self.expansion // 8, 64)
88
+ self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None
89
+
90
+ self.conv3 = ConvNormAct(
91
+ planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False)
92
+
93
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
94
+ self.act = nn.ReLU(inplace=True)
95
+
96
+ def forward(self, x):
97
+ if self.downsample is not None:
98
+ shortcut = self.downsample(x)
99
+ else:
100
+ shortcut = x
101
+ out = self.conv1(x)
102
+ out = self.conv2(out)
103
+ if self.se is not None:
104
+ out = self.se(out)
105
+ out = self.conv3(out)
106
+ out = self.drop_path(out) + shortcut
107
+ out = self.act(out)
108
+ return out
109
+
110
+
111
+ class TResNet(nn.Module):
112
+ def __init__(
113
+ self,
114
+ layers,
115
+ in_chans=3,
116
+ num_classes=1000,
117
+ width_factor=1.0,
118
+ v2=False,
119
+ global_pool='fast',
120
+ drop_rate=0.,
121
+ drop_path_rate=0.,
122
+ ):
123
+ self.num_classes = num_classes
124
+ self.drop_rate = drop_rate
125
+ self.grad_checkpointing = False
126
+ super(TResNet, self).__init__()
127
+
128
+ aa_layer = BlurPool2d
129
+ act_layer = nn.LeakyReLU
130
+
131
+ # TResnet stages
132
+ self.inplanes = int(64 * width_factor)
133
+ self.planes = int(64 * width_factor)
134
+ if v2:
135
+ self.inplanes = self.inplanes // 8 * 8
136
+ self.planes = self.planes // 8 * 8
137
+
138
+ dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
139
+ conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer)
140
+ layer1 = self._make_layer(
141
+ Bottleneck if v2 else BasicBlock,
142
+ self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0])
143
+ layer2 = self._make_layer(
144
+ Bottleneck if v2 else BasicBlock,
145
+ self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1])
146
+ layer3 = self._make_layer(
147
+ Bottleneck,
148
+ self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2])
149
+ layer4 = self._make_layer(
150
+ Bottleneck,
151
+ self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3])
152
+
153
+ # body
154
+ self.body = nn.Sequential(OrderedDict([
155
+ ('s2d', SpaceToDepth()),
156
+ ('conv1', conv1),
157
+ ('layer1', layer1),
158
+ ('layer2', layer2),
159
+ ('layer3', layer3),
160
+ ('layer4', layer4),
161
+ ]))
162
+
163
+ self.feature_info = [
164
+ dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D?
165
+ dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'),
166
+ dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'),
167
+ dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'),
168
+ dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'),
169
+ ]
170
+
171
+ # head
172
+ self.num_features = self.head_hidden_size = (self.planes * 8) * Bottleneck.expansion
173
+ self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
174
+
175
+ # model initialization
176
+ for m in self.modules():
177
+ if isinstance(m, nn.Conv2d):
178
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
179
+ if isinstance(m, nn.Linear):
180
+ m.weight.data.normal_(0, 0.01)
181
+
182
+ # residual connections special initialization
183
+ for m in self.modules():
184
+ if isinstance(m, BasicBlock):
185
+ nn.init.zeros_(m.conv2.bn.weight)
186
+ if isinstance(m, Bottleneck):
187
+ nn.init.zeros_(m.conv3.bn.weight)
188
+
189
+ def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.):
190
+ downsample = None
191
+ if stride != 1 or self.inplanes != planes * block.expansion:
192
+ layers = []
193
+ if stride == 2:
194
+ # avg pooling before 1x1 conv
195
+ layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
196
+ layers += [ConvNormAct(
197
+ self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False)]
198
+ downsample = nn.Sequential(*layers)
199
+
200
+ layers = []
201
+ for i in range(blocks):
202
+ layers.append(block(
203
+ self.inplanes,
204
+ planes,
205
+ stride=stride if i == 0 else 1,
206
+ downsample=downsample if i == 0 else None,
207
+ use_se=use_se,
208
+ aa_layer=aa_layer,
209
+ drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate,
210
+ ))
211
+ self.inplanes = planes * block.expansion
212
+ return nn.Sequential(*layers)
213
+
214
+ @torch.jit.ignore
215
+ def group_matcher(self, coarse=False):
216
+ matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)')
217
+ return matcher
218
+
219
+ @torch.jit.ignore
220
+ def set_grad_checkpointing(self, enable=True):
221
+ self.grad_checkpointing = enable
222
+
223
+ @torch.jit.ignore
224
+ def get_classifier(self) -> nn.Module:
225
+ return self.head.fc
226
+
227
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
228
+ self.head.reset(num_classes, pool_type=global_pool)
229
+
230
+ def forward_features(self, x):
231
+ if self.grad_checkpointing and not torch.jit.is_scripting():
232
+ x = self.body.s2d(x)
233
+ x = self.body.conv1(x)
234
+ x = checkpoint_seq([
235
+ self.body.layer1,
236
+ self.body.layer2,
237
+ self.body.layer3,
238
+ self.body.layer4],
239
+ x, flatten=True)
240
+ else:
241
+ x = self.body(x)
242
+ return x
243
+
244
+ def forward_head(self, x, pre_logits: bool = False):
245
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
246
+
247
+ def forward(self, x):
248
+ x = self.forward_features(x)
249
+ x = self.forward_head(x)
250
+ return x
251
+
252
+
253
+ def checkpoint_filter_fn(state_dict, model):
254
+ if 'body.conv1.conv.weight' in state_dict:
255
+ return state_dict
256
+
257
+ import re
258
+ state_dict = state_dict.get('model', state_dict)
259
+ state_dict = state_dict.get('state_dict', state_dict)
260
+ out_dict = {}
261
+ for k, v in state_dict.items():
262
+ k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k)
263
+ k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k)
264
+ k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k)
265
+ k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k)
266
+ k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k)
267
+ k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k)
268
+ if k.endswith('bn.weight'):
269
+ # convert weight from inplace_abn to batchnorm
270
+ v = v.abs().add(1e-5)
271
+ out_dict[k] = v
272
+ return out_dict
273
+
274
+
275
+ def _create_tresnet(variant, pretrained=False, **kwargs):
276
+ return build_model_with_cfg(
277
+ TResNet,
278
+ variant,
279
+ pretrained,
280
+ pretrained_filter_fn=checkpoint_filter_fn,
281
+ feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True),
282
+ **kwargs,
283
+ )
284
+
285
+
286
+ def _cfg(url='', **kwargs):
287
+ return {
288
+ 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
289
+ 'crop_pct': 0.875, 'interpolation': 'bilinear',
290
+ 'mean': (0., 0., 0.), 'std': (1., 1., 1.),
291
+ 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc',
292
+ **kwargs
293
+ }
294
+
295
+
296
+ default_cfgs = generate_default_cfgs({
297
+ 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
298
+ 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
299
+ 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'),
300
+ 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'),
301
+ 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'),
302
+ 'tresnet_m.miil_in1k_448': _cfg(
303
+ input_size=(3, 448, 448), pool_size=(14, 14),
304
+ hf_hub_id='timm/'),
305
+ 'tresnet_l.miil_in1k_448': _cfg(
306
+ input_size=(3, 448, 448), pool_size=(14, 14),
307
+ hf_hub_id='timm/'),
308
+ 'tresnet_xl.miil_in1k_448': _cfg(
309
+ input_size=(3, 448, 448), pool_size=(14, 14),
310
+ hf_hub_id='timm/'),
311
+
312
+ 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'),
313
+ 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221),
314
+ })
315
+
316
+
317
+ @register_model
318
+ def tresnet_m(pretrained=False, **kwargs) -> TResNet:
319
+ model_args = dict(layers=[3, 4, 11, 3])
320
+ return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs))
321
+
322
+
323
+ @register_model
324
+ def tresnet_l(pretrained=False, **kwargs) -> TResNet:
325
+ model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2)
326
+ return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs))
327
+
328
+
329
+ @register_model
330
+ def tresnet_xl(pretrained=False, **kwargs) -> TResNet:
331
+ model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3)
332
+ return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs))
333
+
334
+
335
+ @register_model
336
+ def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet:
337
+ model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True)
338
+ return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs))
339
+
340
+
341
+ register_model_deprecations(__name__, {
342
+ 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k',
343
+ 'tresnet_m_448': 'tresnet_m.miil_in1k_448',
344
+ 'tresnet_l_448': 'tresnet_l.miil_in1k_448',
345
+ 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448',
346
+ })
pytorch-image-models/timm/models/twins.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Twins
2
+ A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers`
3
+ - https://arxiv.org/pdf/2104.13840.pdf
4
+
5
+ Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below
6
+
7
+ """
8
+ # --------------------------------------------------------
9
+ # Twins
10
+ # Copyright (c) 2021 Meituan
11
+ # Licensed under The Apache 2.0 License [see LICENSE for details]
12
+ # Written by Xinjie Li, Xiangxiang Chu
13
+ # --------------------------------------------------------
14
+ import math
15
+ from functools import partial
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+
22
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
23
+ from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn
24
+ from ._builder import build_model_with_cfg
25
+ from ._features import feature_take_indices
26
+ from ._features_fx import register_notrace_module
27
+ from ._registry import register_model, generate_default_cfgs
28
+ from .vision_transformer import Attention
29
+
30
+ __all__ = ['Twins'] # model_registry will add each entrypoint fn to this
31
+
32
+ Size_ = Tuple[int, int]
33
+
34
+
35
+ @register_notrace_module # reason: FX can't symbolically trace control flow in forward method
36
+ class LocallyGroupedAttn(nn.Module):
37
+ """ LSA: self attention within a group
38
+ """
39
+ fused_attn: torch.jit.Final[bool]
40
+
41
+ def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1):
42
+ assert ws != 1
43
+ super(LocallyGroupedAttn, self).__init__()
44
+ assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
45
+
46
+ self.dim = dim
47
+ self.num_heads = num_heads
48
+ head_dim = dim // num_heads
49
+ self.scale = head_dim ** -0.5
50
+ self.fused_attn = use_fused_attn()
51
+
52
+ self.qkv = nn.Linear(dim, dim * 3, bias=True)
53
+ self.attn_drop = nn.Dropout(attn_drop)
54
+ self.proj = nn.Linear(dim, dim)
55
+ self.proj_drop = nn.Dropout(proj_drop)
56
+ self.ws = ws
57
+
58
+ def forward(self, x, size: Size_):
59
+ # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for
60
+ # both. You can choose any one, we recommend forward_padding because it's neat. However,
61
+ # the masking implementation is more reasonable and accurate.
62
+ B, N, C = x.shape
63
+ H, W = size
64
+ x = x.view(B, H, W, C)
65
+ pad_l = pad_t = 0
66
+ pad_r = (self.ws - W % self.ws) % self.ws
67
+ pad_b = (self.ws - H % self.ws) % self.ws
68
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
69
+ _, Hp, Wp, _ = x.shape
70
+ _h, _w = Hp // self.ws, Wp // self.ws
71
+ x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3)
72
+ qkv = self.qkv(x).reshape(
73
+ B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
74
+ q, k, v = qkv.unbind(0)
75
+
76
+ if self.fused_attn:
77
+ x = F.scaled_dot_product_attention(
78
+ q, k, v,
79
+ dropout_p=self.attn_drop.p if self.training else 0.,
80
+ )
81
+ else:
82
+ q = q * self.scale
83
+ attn = q @ k.transpose(-2, -1)
84
+ attn = attn.softmax(dim=-1)
85
+ attn = self.attn_drop(attn)
86
+ x = attn @ v
87
+
88
+ x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
89
+ x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
90
+ if pad_r > 0 or pad_b > 0:
91
+ x = x[:, :H, :W, :].contiguous()
92
+ x = x.reshape(B, N, C)
93
+ x = self.proj(x)
94
+ x = self.proj_drop(x)
95
+ return x
96
+
97
+ # def forward_mask(self, x, size: Size_):
98
+ # B, N, C = x.shape
99
+ # H, W = size
100
+ # x = x.view(B, H, W, C)
101
+ # pad_l = pad_t = 0
102
+ # pad_r = (self.ws - W % self.ws) % self.ws
103
+ # pad_b = (self.ws - H % self.ws) % self.ws
104
+ # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
105
+ # _, Hp, Wp, _ = x.shape
106
+ # _h, _w = Hp // self.ws, Wp // self.ws
107
+ # mask = torch.zeros((1, Hp, Wp), device=x.device)
108
+ # mask[:, -pad_b:, :].fill_(1)
109
+ # mask[:, :, -pad_r:].fill_(1)
110
+ #
111
+ # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C
112
+ # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws)
113
+ # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws
114
+ # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0))
115
+ # qkv = self.qkv(x).reshape(
116
+ # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5)
117
+ # # n_h, B, _w*_h, nhead, ws*ws, dim
118
+ # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head
119
+ # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws
120
+ # attn = attn + attn_mask.unsqueeze(2)
121
+ # attn = attn.softmax(dim=-1)
122
+ # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head
123
+ # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C)
124
+ # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C)
125
+ # if pad_r > 0 or pad_b > 0:
126
+ # x = x[:, :H, :W, :].contiguous()
127
+ # x = x.reshape(B, N, C)
128
+ # x = self.proj(x)
129
+ # x = self.proj_drop(x)
130
+ # return x
131
+
132
+
133
+ class GlobalSubSampleAttn(nn.Module):
134
+ """ GSA: using a key to summarize the information for a group to be efficient.
135
+ """
136
+ fused_attn: torch.jit.Final[bool]
137
+
138
+ def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1):
139
+ super().__init__()
140
+ assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
141
+
142
+ self.dim = dim
143
+ self.num_heads = num_heads
144
+ head_dim = dim // num_heads
145
+ self.scale = head_dim ** -0.5
146
+ self.fused_attn = use_fused_attn()
147
+
148
+ self.q = nn.Linear(dim, dim, bias=True)
149
+ self.kv = nn.Linear(dim, dim * 2, bias=True)
150
+ self.attn_drop = nn.Dropout(attn_drop)
151
+ self.proj = nn.Linear(dim, dim)
152
+ self.proj_drop = nn.Dropout(proj_drop)
153
+
154
+ self.sr_ratio = sr_ratio
155
+ if sr_ratio > 1:
156
+ self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
157
+ self.norm = nn.LayerNorm(dim)
158
+ else:
159
+ self.sr = None
160
+ self.norm = None
161
+
162
+ def forward(self, x, size: Size_):
163
+ B, N, C = x.shape
164
+ q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
165
+
166
+ if self.sr is not None:
167
+ x = x.permute(0, 2, 1).reshape(B, C, *size)
168
+ x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1)
169
+ x = self.norm(x)
170
+ kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
171
+ k, v = kv.unbind(0)
172
+
173
+ if self.fused_attn:
174
+ x = torch.nn.functional.scaled_dot_product_attention(
175
+ q, k, v,
176
+ dropout_p=self.attn_drop.p if self.training else 0.,
177
+ )
178
+ else:
179
+ q = q * self.scale
180
+ attn = q @ k.transpose(-2, -1)
181
+ attn = attn.softmax(dim=-1)
182
+ attn = self.attn_drop(attn)
183
+ x = attn @ v
184
+
185
+ x = x.transpose(1, 2).reshape(B, N, C)
186
+ x = self.proj(x)
187
+ x = self.proj_drop(x)
188
+
189
+ return x
190
+
191
+
192
+ class Block(nn.Module):
193
+
194
+ def __init__(
195
+ self,
196
+ dim,
197
+ num_heads,
198
+ mlp_ratio=4.,
199
+ proj_drop=0.,
200
+ attn_drop=0.,
201
+ drop_path=0.,
202
+ act_layer=nn.GELU,
203
+ norm_layer=nn.LayerNorm,
204
+ sr_ratio=1,
205
+ ws=None,
206
+ ):
207
+ super().__init__()
208
+ self.norm1 = norm_layer(dim)
209
+ if ws is None:
210
+ self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop)
211
+ elif ws == 1:
212
+ self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio)
213
+ else:
214
+ self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws)
215
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
216
+
217
+ self.norm2 = norm_layer(dim)
218
+ self.mlp = Mlp(
219
+ in_features=dim,
220
+ hidden_features=int(dim * mlp_ratio),
221
+ act_layer=act_layer,
222
+ drop=proj_drop,
223
+ )
224
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
225
+
226
+ def forward(self, x, size: Size_):
227
+ x = x + self.drop_path1(self.attn(self.norm1(x), size))
228
+ x = x + self.drop_path2(self.mlp(self.norm2(x)))
229
+ return x
230
+
231
+
232
+ class PosConv(nn.Module):
233
+ # PEG from https://arxiv.org/abs/2102.10882
234
+ def __init__(self, in_chans, embed_dim=768, stride=1):
235
+ super(PosConv, self).__init__()
236
+ self.proj = nn.Sequential(
237
+ nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim),
238
+ )
239
+ self.stride = stride
240
+
241
+ def forward(self, x, size: Size_):
242
+ B, N, C = x.shape
243
+ cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
244
+ x = self.proj(cnn_feat_token)
245
+ if self.stride == 1:
246
+ x += cnn_feat_token
247
+ x = x.flatten(2).transpose(1, 2)
248
+ return x
249
+
250
+ def no_weight_decay(self):
251
+ return ['proj.%d.weight' % i for i in range(4)]
252
+
253
+
254
+ class PatchEmbed(nn.Module):
255
+ """ Image to Patch Embedding
256
+ """
257
+
258
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
259
+ super().__init__()
260
+ img_size = to_2tuple(img_size)
261
+ patch_size = to_2tuple(patch_size)
262
+
263
+ self.img_size = img_size
264
+ self.patch_size = patch_size
265
+ assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
266
+ f"img_size {img_size} should be divided by patch_size {patch_size}."
267
+ self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
268
+ self.num_patches = self.H * self.W
269
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
270
+ self.norm = nn.LayerNorm(embed_dim)
271
+
272
+ def forward(self, x) -> Tuple[torch.Tensor, Size_]:
273
+ B, C, H, W = x.shape
274
+
275
+ x = self.proj(x).flatten(2).transpose(1, 2)
276
+ x = self.norm(x)
277
+ out_size = (H // self.patch_size[0], W // self.patch_size[1])
278
+
279
+ return x, out_size
280
+
281
+
282
+ class Twins(nn.Module):
283
+ """ Twins Vision Transfomer (Revisiting Spatial Attention)
284
+
285
+ Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git
286
+ """
287
+ def __init__(
288
+ self,
289
+ img_size=224,
290
+ patch_size=4,
291
+ in_chans=3,
292
+ num_classes=1000,
293
+ global_pool='avg',
294
+ embed_dims=(64, 128, 256, 512),
295
+ num_heads=(1, 2, 4, 8),
296
+ mlp_ratios=(4, 4, 4, 4),
297
+ depths=(3, 4, 6, 3),
298
+ sr_ratios=(8, 4, 2, 1),
299
+ wss=None,
300
+ drop_rate=0.,
301
+ pos_drop_rate=0.,
302
+ proj_drop_rate=0.,
303
+ attn_drop_rate=0.,
304
+ drop_path_rate=0.,
305
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
306
+ block_cls=Block,
307
+ ):
308
+ super().__init__()
309
+ self.num_classes = num_classes
310
+ self.global_pool = global_pool
311
+ self.depths = depths
312
+ self.embed_dims = embed_dims
313
+ self.num_features = self.head_hidden_size = embed_dims[-1]
314
+ self.grad_checkpointing = False
315
+
316
+ img_size = to_2tuple(img_size)
317
+ prev_chs = in_chans
318
+ self.patch_embeds = nn.ModuleList()
319
+ self.pos_drops = nn.ModuleList()
320
+ for i in range(len(depths)):
321
+ self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i]))
322
+ self.pos_drops.append(nn.Dropout(p=pos_drop_rate))
323
+ prev_chs = embed_dims[i]
324
+ img_size = tuple(t // patch_size for t in img_size)
325
+ patch_size = 2
326
+
327
+ self.blocks = nn.ModuleList()
328
+ self.feature_info = []
329
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
330
+ cur = 0
331
+ for k in range(len(depths)):
332
+ _block = nn.ModuleList([block_cls(
333
+ dim=embed_dims[k],
334
+ num_heads=num_heads[k],
335
+ mlp_ratio=mlp_ratios[k],
336
+ proj_drop=proj_drop_rate,
337
+ attn_drop=attn_drop_rate,
338
+ drop_path=dpr[cur + i],
339
+ norm_layer=norm_layer,
340
+ sr_ratio=sr_ratios[k],
341
+ ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])],
342
+ )
343
+ self.blocks.append(_block)
344
+ self.feature_info += [dict(module=f'block.{k}', num_chs=embed_dims[k], reduction=2**(2+k))]
345
+ cur += depths[k]
346
+
347
+ self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims])
348
+
349
+ self.norm = norm_layer(self.num_features)
350
+
351
+ # classification head
352
+ self.head_drop = nn.Dropout(drop_rate)
353
+ self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
354
+
355
+ # init weights
356
+ self.apply(self._init_weights)
357
+
358
+ @torch.jit.ignore
359
+ def no_weight_decay(self):
360
+ return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()])
361
+
362
+ @torch.jit.ignore
363
+ def group_matcher(self, coarse=False):
364
+ matcher = dict(
365
+ stem=r'^patch_embeds.0', # stem and embed
366
+ blocks=[
367
+ (r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None),
368
+ ('^norm', (99999,))
369
+ ] if coarse else [
370
+ (r'^blocks\.(\d+)\.(\d+)', None),
371
+ (r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)),
372
+ (r'^norm', (99999,))
373
+ ]
374
+ )
375
+ return matcher
376
+
377
+ @torch.jit.ignore
378
+ def set_grad_checkpointing(self, enable=True):
379
+ assert not enable, 'gradient checkpointing not supported'
380
+
381
+ @torch.jit.ignore
382
+ def get_classifier(self) -> nn.Module:
383
+ return self.head
384
+
385
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
386
+ self.num_classes = num_classes
387
+ if global_pool is not None:
388
+ assert global_pool in ('', 'avg')
389
+ self.global_pool = global_pool
390
+ self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
391
+
392
+ def _init_weights(self, m):
393
+ if isinstance(m, nn.Linear):
394
+ trunc_normal_(m.weight, std=.02)
395
+ if isinstance(m, nn.Linear) and m.bias is not None:
396
+ nn.init.constant_(m.bias, 0)
397
+ elif isinstance(m, nn.LayerNorm):
398
+ nn.init.constant_(m.bias, 0)
399
+ nn.init.constant_(m.weight, 1.0)
400
+ elif isinstance(m, nn.Conv2d):
401
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
402
+ fan_out //= m.groups
403
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
404
+ if m.bias is not None:
405
+ m.bias.data.zero_()
406
+
407
+ def forward_intermediates(
408
+ self,
409
+ x: torch.Tensor,
410
+ indices: Optional[Union[int, List[int]]] = None,
411
+ norm: bool = False,
412
+ stop_early: bool = False,
413
+ output_fmt: str = 'NCHW',
414
+ intermediates_only: bool = False,
415
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
416
+ """ Forward features that returns intermediates.
417
+ Args:
418
+ x: Input image tensor
419
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
420
+ norm: Apply norm layer to all intermediates
421
+ stop_early: Stop iterating over blocks when last desired intermediate hit
422
+ output_fmt: Shape of intermediate feature outputs
423
+ intermediates_only: Only return intermediate features
424
+ Returns:
425
+
426
+ """
427
+ assert output_fmt == 'NCHW', 'Output shape for Twins must be NCHW.'
428
+ intermediates = []
429
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
430
+
431
+ # FIXME slice block/pos_block if < max
432
+
433
+ # forward pass
434
+ B, _, height, width = x.shape
435
+ for i, (embed, drop, blocks, pos_blk) in enumerate(zip(
436
+ self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)
437
+ ):
438
+ x, size = embed(x)
439
+ x = drop(x)
440
+ for j, blk in enumerate(blocks):
441
+ x = blk(x, size)
442
+ if j == 0:
443
+ x = pos_blk(x, size) # PEG here
444
+
445
+ if i < len(self.depths) - 1:
446
+ x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
447
+ if i in take_indices:
448
+ intermediates.append(x)
449
+ else:
450
+ if i in take_indices:
451
+ # only last feature can be normed
452
+ x_feat = self.norm(x) if norm else x
453
+ intermediates.append(x_feat.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous())
454
+
455
+ if intermediates_only:
456
+ return intermediates
457
+
458
+ x = self.norm(x)
459
+
460
+ return x, intermediates
461
+
462
+ def prune_intermediate_layers(
463
+ self,
464
+ indices: Union[int, List[int]] = 1,
465
+ prune_norm: bool = False,
466
+ prune_head: bool = True,
467
+ ):
468
+ """ Prune layers not required for specified intermediates.
469
+ """
470
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
471
+ # FIXME add block pruning
472
+ if prune_norm:
473
+ self.norm = nn.Identity()
474
+ if prune_head:
475
+ self.reset_classifier(0, '')
476
+ return take_indices
477
+
478
+ def forward_features(self, x):
479
+ B = x.shape[0]
480
+ for i, (embed, drop, blocks, pos_blk) in enumerate(
481
+ zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)):
482
+ x, size = embed(x)
483
+ x = drop(x)
484
+ for j, blk in enumerate(blocks):
485
+ x = blk(x, size)
486
+ if j == 0:
487
+ x = pos_blk(x, size) # PEG here
488
+ if i < len(self.depths) - 1:
489
+ x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()
490
+ x = self.norm(x)
491
+ return x
492
+
493
+ def forward_head(self, x, pre_logits: bool = False):
494
+ if self.global_pool == 'avg':
495
+ x = x.mean(dim=1)
496
+ x = self.head_drop(x)
497
+ return x if pre_logits else self.head(x)
498
+
499
+ def forward(self, x):
500
+ x = self.forward_features(x)
501
+ x = self.forward_head(x)
502
+ return x
503
+
504
+
505
+ def _create_twins(variant, pretrained=False, **kwargs):
506
+ out_indices = kwargs.pop('out_indices', 4)
507
+ model = build_model_with_cfg(
508
+ Twins, variant, pretrained,
509
+ feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
510
+ **kwargs,
511
+ )
512
+ return model
513
+
514
+
515
+ def _cfg(url='', **kwargs):
516
+ return {
517
+ 'url': url,
518
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
519
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
520
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
521
+ 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head',
522
+ **kwargs
523
+ }
524
+
525
+
526
+ default_cfgs = generate_default_cfgs({
527
+ 'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'),
528
+ 'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'),
529
+ 'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'),
530
+ 'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'),
531
+ 'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'),
532
+ 'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'),
533
+ })
534
+
535
+
536
+ @register_model
537
+ def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins:
538
+ model_args = dict(
539
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
540
+ depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1])
541
+ return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs))
542
+
543
+
544
+ @register_model
545
+ def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins:
546
+ model_args = dict(
547
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
548
+ depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1])
549
+ return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs))
550
+
551
+
552
+ @register_model
553
+ def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins:
554
+ model_args = dict(
555
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
556
+ depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1])
557
+ return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs))
558
+
559
+
560
+ @register_model
561
+ def twins_svt_small(pretrained=False, **kwargs) -> Twins:
562
+ model_args = dict(
563
+ patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4],
564
+ depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
565
+ return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs))
566
+
567
+
568
+ @register_model
569
+ def twins_svt_base(pretrained=False, **kwargs) -> Twins:
570
+ model_args = dict(
571
+ patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4],
572
+ depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
573
+ return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs))
574
+
575
+
576
+ @register_model
577
+ def twins_svt_large(pretrained=False, **kwargs) -> Twins:
578
+ model_args = dict(
579
+ patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4],
580
+ depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1])
581
+ return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/vgg.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """VGG
2
+
3
+ Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for
4
+ timm functionality.
5
+
6
+ Copyright 2021 Ross Wightman
7
+ """
8
+ from typing import Any, Dict, List, Optional, Union, cast
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
15
+ from timm.layers import ClassifierHead
16
+ from ._builder import build_model_with_cfg
17
+ from ._features_fx import register_notrace_module
18
+ from ._registry import register_model, generate_default_cfgs
19
+
20
+ __all__ = ['VGG']
21
+
22
+
23
+ cfgs: Dict[str, List[Union[str, int]]] = {
24
+ 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
25
+ 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
26
+ 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
27
+ 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
28
+ }
29
+
30
+
31
+ @register_notrace_module # reason: FX can't symbolically trace control flow in forward method
32
+ class ConvMlp(nn.Module):
33
+
34
+ def __init__(
35
+ self,
36
+ in_features=512,
37
+ out_features=4096,
38
+ kernel_size=7,
39
+ mlp_ratio=1.0,
40
+ drop_rate: float = 0.2,
41
+ act_layer: nn.Module = None,
42
+ conv_layer: nn.Module = None,
43
+ ):
44
+ super(ConvMlp, self).__init__()
45
+ self.input_kernel_size = kernel_size
46
+ mid_features = int(out_features * mlp_ratio)
47
+ self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True)
48
+ self.act1 = act_layer(True)
49
+ self.drop = nn.Dropout(drop_rate)
50
+ self.fc2 = conv_layer(mid_features, out_features, 1, bias=True)
51
+ self.act2 = act_layer(True)
52
+
53
+ def forward(self, x):
54
+ if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size:
55
+ # keep the input size >= 7x7
56
+ output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1]))
57
+ x = F.adaptive_avg_pool2d(x, output_size)
58
+ x = self.fc1(x)
59
+ x = self.act1(x)
60
+ x = self.drop(x)
61
+ x = self.fc2(x)
62
+ x = self.act2(x)
63
+ return x
64
+
65
+
66
+ class VGG(nn.Module):
67
+
68
+ def __init__(
69
+ self,
70
+ cfg: List[Any],
71
+ num_classes: int = 1000,
72
+ in_chans: int = 3,
73
+ output_stride: int = 32,
74
+ mlp_ratio: float = 1.0,
75
+ act_layer: nn.Module = nn.ReLU,
76
+ conv_layer: nn.Module = nn.Conv2d,
77
+ norm_layer: nn.Module = None,
78
+ global_pool: str = 'avg',
79
+ drop_rate: float = 0.,
80
+ ) -> None:
81
+ super(VGG, self).__init__()
82
+ assert output_stride == 32
83
+ self.num_classes = num_classes
84
+ self.drop_rate = drop_rate
85
+ self.grad_checkpointing = False
86
+ self.use_norm = norm_layer is not None
87
+ self.feature_info = []
88
+
89
+ prev_chs = in_chans
90
+ net_stride = 1
91
+ pool_layer = nn.MaxPool2d
92
+ layers: List[nn.Module] = []
93
+ for v in cfg:
94
+ last_idx = len(layers) - 1
95
+ if v == 'M':
96
+ self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}'))
97
+ layers += [pool_layer(kernel_size=2, stride=2)]
98
+ net_stride *= 2
99
+ else:
100
+ v = cast(int, v)
101
+ conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1)
102
+ if norm_layer is not None:
103
+ layers += [conv2d, norm_layer(v), act_layer(inplace=True)]
104
+ else:
105
+ layers += [conv2d, act_layer(inplace=True)]
106
+ prev_chs = v
107
+ self.features = nn.Sequential(*layers)
108
+ self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}'))
109
+
110
+ self.num_features = prev_chs
111
+ self.head_hidden_size = 4096
112
+ self.pre_logits = ConvMlp(
113
+ prev_chs,
114
+ self.head_hidden_size,
115
+ 7,
116
+ mlp_ratio=mlp_ratio,
117
+ drop_rate=drop_rate,
118
+ act_layer=act_layer,
119
+ conv_layer=conv_layer,
120
+ )
121
+ self.head = ClassifierHead(
122
+ self.head_hidden_size,
123
+ num_classes,
124
+ pool_type=global_pool,
125
+ drop_rate=drop_rate,
126
+ )
127
+
128
+ self._initialize_weights()
129
+
130
+ @torch.jit.ignore
131
+ def group_matcher(self, coarse=False):
132
+ # this treats BN layers as separate groups for bn variants, a lot of effort to fix that
133
+ return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)')
134
+
135
+ @torch.jit.ignore
136
+ def set_grad_checkpointing(self, enable=True):
137
+ assert not enable, 'gradient checkpointing not supported'
138
+
139
+ @torch.jit.ignore
140
+ def get_classifier(self) -> nn.Module:
141
+ return self.head.fc
142
+
143
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
144
+ self.num_classes = num_classes
145
+ self.head.reset(num_classes, global_pool)
146
+
147
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
148
+ x = self.features(x)
149
+ return x
150
+
151
+ def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
152
+ x = self.pre_logits(x)
153
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
154
+
155
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
156
+ x = self.forward_features(x)
157
+ x = self.forward_head(x)
158
+ return x
159
+
160
+ def _initialize_weights(self) -> None:
161
+ for m in self.modules():
162
+ if isinstance(m, nn.Conv2d):
163
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
164
+ if m.bias is not None:
165
+ nn.init.constant_(m.bias, 0)
166
+ elif isinstance(m, nn.BatchNorm2d):
167
+ nn.init.constant_(m.weight, 1)
168
+ nn.init.constant_(m.bias, 0)
169
+ elif isinstance(m, nn.Linear):
170
+ nn.init.normal_(m.weight, 0, 0.01)
171
+ nn.init.constant_(m.bias, 0)
172
+
173
+
174
+ def _filter_fn(state_dict):
175
+ """ convert patch embedding weight from manual patchify + linear proj to conv"""
176
+ out_dict = {}
177
+ for k, v in state_dict.items():
178
+ k_r = k
179
+ k_r = k_r.replace('classifier.0', 'pre_logits.fc1')
180
+ k_r = k_r.replace('classifier.3', 'pre_logits.fc2')
181
+ k_r = k_r.replace('classifier.6', 'head.fc')
182
+ if 'classifier.0.weight' in k:
183
+ v = v.reshape(-1, 512, 7, 7)
184
+ if 'classifier.3.weight' in k:
185
+ v = v.reshape(-1, 4096, 1, 1)
186
+ out_dict[k_r] = v
187
+ return out_dict
188
+
189
+
190
+ def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG:
191
+ cfg = variant.split('_')[0]
192
+ # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5]
193
+ out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5))
194
+ model = build_model_with_cfg(
195
+ VGG,
196
+ variant,
197
+ pretrained,
198
+ model_cfg=cfgs[cfg],
199
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
200
+ pretrained_filter_fn=_filter_fn,
201
+ **kwargs,
202
+ )
203
+ return model
204
+
205
+
206
+ def _cfg(url='', **kwargs):
207
+ return {
208
+ 'url': url,
209
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
210
+ 'crop_pct': 0.875, 'interpolation': 'bilinear',
211
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
212
+ 'first_conv': 'features.0', 'classifier': 'head.fc',
213
+ **kwargs
214
+ }
215
+
216
+
217
+ default_cfgs = generate_default_cfgs({
218
+ 'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'),
219
+ 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'),
220
+ 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'),
221
+ 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'),
222
+ 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
223
+ 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
224
+ 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
225
+ 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'),
226
+ })
227
+
228
+
229
+ @register_model
230
+ def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG:
231
+ r"""VGG 11-layer model (configuration "A") from
232
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
233
+ """
234
+ model_args = dict(**kwargs)
235
+ return _create_vgg('vgg11', pretrained=pretrained, **model_args)
236
+
237
+
238
+ @register_model
239
+ def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
240
+ r"""VGG 11-layer model (configuration "A") with batch normalization
241
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
242
+ """
243
+ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
244
+ return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args)
245
+
246
+
247
+ @register_model
248
+ def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG:
249
+ r"""VGG 13-layer model (configuration "B")
250
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
251
+ """
252
+ model_args = dict(**kwargs)
253
+ return _create_vgg('vgg13', pretrained=pretrained, **model_args)
254
+
255
+
256
+ @register_model
257
+ def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
258
+ r"""VGG 13-layer model (configuration "B") with batch normalization
259
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
260
+ """
261
+ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
262
+ return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args)
263
+
264
+
265
+ @register_model
266
+ def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG:
267
+ r"""VGG 16-layer model (configuration "D")
268
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
269
+ """
270
+ model_args = dict(**kwargs)
271
+ return _create_vgg('vgg16', pretrained=pretrained, **model_args)
272
+
273
+
274
+ @register_model
275
+ def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
276
+ r"""VGG 16-layer model (configuration "D") with batch normalization
277
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
278
+ """
279
+ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
280
+ return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args)
281
+
282
+
283
+ @register_model
284
+ def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG:
285
+ r"""VGG 19-layer model (configuration "E")
286
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
287
+ """
288
+ model_args = dict(**kwargs)
289
+ return _create_vgg('vgg19', pretrained=pretrained, **model_args)
290
+
291
+
292
+ @register_model
293
+ def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG:
294
+ r"""VGG 19-layer model (configuration 'E') with batch normalization
295
+ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._
296
+ """
297
+ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs)
298
+ return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args)
pytorch-image-models/timm/models/visformer.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Visformer
2
+
3
+ Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
4
+
5
+ From original at https://github.com/danczs/Visformer
6
+
7
+ Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
8
+ """
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+
13
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
14
+ from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn
15
+ from ._builder import build_model_with_cfg
16
+ from ._manipulate import checkpoint_seq
17
+ from ._registry import register_model, generate_default_cfgs
18
+
19
+ __all__ = ['Visformer']
20
+
21
+
22
+ class SpatialMlp(nn.Module):
23
+ def __init__(
24
+ self,
25
+ in_features,
26
+ hidden_features=None,
27
+ out_features=None,
28
+ act_layer=nn.GELU,
29
+ drop=0.,
30
+ group=8,
31
+ spatial_conv=False,
32
+ ):
33
+ super().__init__()
34
+ out_features = out_features or in_features
35
+ hidden_features = hidden_features or in_features
36
+ drop_probs = to_2tuple(drop)
37
+
38
+ self.in_features = in_features
39
+ self.out_features = out_features
40
+ self.spatial_conv = spatial_conv
41
+ if self.spatial_conv:
42
+ if group < 2: # net setting
43
+ hidden_features = in_features * 5 // 6
44
+ else:
45
+ hidden_features = in_features * 2
46
+ self.hidden_features = hidden_features
47
+ self.group = group
48
+ self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
49
+ self.act1 = act_layer()
50
+ self.drop1 = nn.Dropout(drop_probs[0])
51
+ if self.spatial_conv:
52
+ self.conv2 = nn.Conv2d(
53
+ hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
54
+ self.act2 = act_layer()
55
+ else:
56
+ self.conv2 = None
57
+ self.act2 = None
58
+ self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
59
+ self.drop3 = nn.Dropout(drop_probs[1])
60
+
61
+ def forward(self, x):
62
+ x = self.conv1(x)
63
+ x = self.act1(x)
64
+ x = self.drop1(x)
65
+ if self.conv2 is not None:
66
+ x = self.conv2(x)
67
+ x = self.act2(x)
68
+ x = self.conv3(x)
69
+ x = self.drop3(x)
70
+ return x
71
+
72
+
73
+ class Attention(nn.Module):
74
+ fused_attn: torch.jit.Final[bool]
75
+
76
+ def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
77
+ super().__init__()
78
+ self.dim = dim
79
+ self.num_heads = num_heads
80
+ head_dim = round(dim // num_heads * head_dim_ratio)
81
+ self.head_dim = head_dim
82
+ self.scale = head_dim ** -0.5
83
+ self.fused_attn = use_fused_attn(experimental=True)
84
+
85
+ self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
86
+ self.attn_drop = nn.Dropout(attn_drop)
87
+ self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
88
+ self.proj_drop = nn.Dropout(proj_drop)
89
+
90
+ def forward(self, x):
91
+ B, C, H, W = x.shape
92
+ x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
93
+ q, k, v = x.unbind(0)
94
+
95
+ if self.fused_attn:
96
+ x = torch.nn.functional.scaled_dot_product_attention(
97
+ q.contiguous(), k.contiguous(), v.contiguous(),
98
+ dropout_p=self.attn_drop.p if self.training else 0.,
99
+ )
100
+ else:
101
+ attn = (q @ k.transpose(-2, -1)) * self.scale
102
+ attn = attn.softmax(dim=-1)
103
+ attn = self.attn_drop(attn)
104
+ x = attn @ v
105
+
106
+ x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
107
+ x = self.proj(x)
108
+ x = self.proj_drop(x)
109
+ return x
110
+
111
+
112
+ class Block(nn.Module):
113
+ def __init__(
114
+ self,
115
+ dim,
116
+ num_heads,
117
+ head_dim_ratio=1.,
118
+ mlp_ratio=4.,
119
+ proj_drop=0.,
120
+ attn_drop=0.,
121
+ drop_path=0.,
122
+ act_layer=nn.GELU,
123
+ norm_layer=LayerNorm2d,
124
+ group=8,
125
+ attn_disabled=False,
126
+ spatial_conv=False,
127
+ ):
128
+ super().__init__()
129
+ self.spatial_conv = spatial_conv
130
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
131
+ if attn_disabled:
132
+ self.norm1 = None
133
+ self.attn = None
134
+ else:
135
+ self.norm1 = norm_layer(dim)
136
+ self.attn = Attention(
137
+ dim,
138
+ num_heads=num_heads,
139
+ head_dim_ratio=head_dim_ratio,
140
+ attn_drop=attn_drop,
141
+ proj_drop=proj_drop,
142
+ )
143
+
144
+ self.norm2 = norm_layer(dim)
145
+ self.mlp = SpatialMlp(
146
+ in_features=dim,
147
+ hidden_features=int(dim * mlp_ratio),
148
+ act_layer=act_layer,
149
+ drop=proj_drop,
150
+ group=group,
151
+ spatial_conv=spatial_conv,
152
+ )
153
+
154
+ def forward(self, x):
155
+ if self.attn is not None:
156
+ x = x + self.drop_path(self.attn(self.norm1(x)))
157
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
158
+ return x
159
+
160
+
161
+ class Visformer(nn.Module):
162
+ def __init__(
163
+ self,
164
+ img_size=224,
165
+ patch_size=16,
166
+ in_chans=3,
167
+ num_classes=1000,
168
+ init_channels=32,
169
+ embed_dim=384,
170
+ depth=12,
171
+ num_heads=6,
172
+ mlp_ratio=4.,
173
+ drop_rate=0.,
174
+ pos_drop_rate=0.,
175
+ proj_drop_rate=0.,
176
+ attn_drop_rate=0.,
177
+ drop_path_rate=0.,
178
+ norm_layer=LayerNorm2d,
179
+ attn_stage='111',
180
+ use_pos_embed=True,
181
+ spatial_conv='111',
182
+ vit_stem=False,
183
+ group=8,
184
+ global_pool='avg',
185
+ conv_init=False,
186
+ embed_norm=None,
187
+ ):
188
+ super().__init__()
189
+ img_size = to_2tuple(img_size)
190
+ self.num_classes = num_classes
191
+ self.embed_dim = embed_dim
192
+ self.init_channels = init_channels
193
+ self.img_size = img_size
194
+ self.vit_stem = vit_stem
195
+ self.conv_init = conv_init
196
+ if isinstance(depth, (list, tuple)):
197
+ self.stage_num1, self.stage_num2, self.stage_num3 = depth
198
+ depth = sum(depth)
199
+ else:
200
+ self.stage_num1 = self.stage_num3 = depth // 3
201
+ self.stage_num2 = depth - self.stage_num1 - self.stage_num3
202
+ self.use_pos_embed = use_pos_embed
203
+ self.grad_checkpointing = False
204
+
205
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
206
+ # stage 1
207
+ if self.vit_stem:
208
+ self.stem = None
209
+ self.patch_embed1 = PatchEmbed(
210
+ img_size=img_size,
211
+ patch_size=patch_size,
212
+ in_chans=in_chans,
213
+ embed_dim=embed_dim,
214
+ norm_layer=embed_norm,
215
+ flatten=False,
216
+ )
217
+ img_size = [x // patch_size for x in img_size]
218
+ else:
219
+ if self.init_channels is None:
220
+ self.stem = None
221
+ self.patch_embed1 = PatchEmbed(
222
+ img_size=img_size,
223
+ patch_size=patch_size // 2,
224
+ in_chans=in_chans,
225
+ embed_dim=embed_dim // 2,
226
+ norm_layer=embed_norm,
227
+ flatten=False,
228
+ )
229
+ img_size = [x // (patch_size // 2) for x in img_size]
230
+ else:
231
+ self.stem = nn.Sequential(
232
+ nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
233
+ nn.BatchNorm2d(self.init_channels),
234
+ nn.ReLU(inplace=True)
235
+ )
236
+ img_size = [x // 2 for x in img_size]
237
+ self.patch_embed1 = PatchEmbed(
238
+ img_size=img_size,
239
+ patch_size=patch_size // 4,
240
+ in_chans=self.init_channels,
241
+ embed_dim=embed_dim // 2,
242
+ norm_layer=embed_norm,
243
+ flatten=False,
244
+ )
245
+ img_size = [x // (patch_size // 4) for x in img_size]
246
+
247
+ if self.use_pos_embed:
248
+ if self.vit_stem:
249
+ self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
250
+ else:
251
+ self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size))
252
+ self.pos_drop = nn.Dropout(p=pos_drop_rate)
253
+ else:
254
+ self.pos_embed1 = None
255
+
256
+ self.stage1 = nn.Sequential(*[
257
+ Block(
258
+ dim=embed_dim//2,
259
+ num_heads=num_heads,
260
+ head_dim_ratio=0.5,
261
+ mlp_ratio=mlp_ratio,
262
+ proj_drop=proj_drop_rate,
263
+ attn_drop=attn_drop_rate,
264
+ drop_path=dpr[i],
265
+ norm_layer=norm_layer,
266
+ group=group,
267
+ attn_disabled=(attn_stage[0] == '0'),
268
+ spatial_conv=(spatial_conv[0] == '1'),
269
+ )
270
+ for i in range(self.stage_num1)
271
+ ])
272
+
273
+ # stage2
274
+ if not self.vit_stem:
275
+ self.patch_embed2 = PatchEmbed(
276
+ img_size=img_size,
277
+ patch_size=patch_size // 8,
278
+ in_chans=embed_dim // 2,
279
+ embed_dim=embed_dim,
280
+ norm_layer=embed_norm,
281
+ flatten=False,
282
+ )
283
+ img_size = [x // (patch_size // 8) for x in img_size]
284
+ if self.use_pos_embed:
285
+ self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
286
+ else:
287
+ self.pos_embed2 = None
288
+ else:
289
+ self.patch_embed2 = None
290
+ self.stage2 = nn.Sequential(*[
291
+ Block(
292
+ dim=embed_dim,
293
+ num_heads=num_heads,
294
+ head_dim_ratio=1.0,
295
+ mlp_ratio=mlp_ratio,
296
+ proj_drop=proj_drop_rate,
297
+ attn_drop=attn_drop_rate,
298
+ drop_path=dpr[i],
299
+ norm_layer=norm_layer,
300
+ group=group,
301
+ attn_disabled=(attn_stage[1] == '0'),
302
+ spatial_conv=(spatial_conv[1] == '1'),
303
+ )
304
+ for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
305
+ ])
306
+
307
+ # stage 3
308
+ if not self.vit_stem:
309
+ self.patch_embed3 = PatchEmbed(
310
+ img_size=img_size,
311
+ patch_size=patch_size // 8,
312
+ in_chans=embed_dim,
313
+ embed_dim=embed_dim * 2,
314
+ norm_layer=embed_norm,
315
+ flatten=False,
316
+ )
317
+ img_size = [x // (patch_size // 8) for x in img_size]
318
+ if self.use_pos_embed:
319
+ self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
320
+ else:
321
+ self.pos_embed3 = None
322
+ else:
323
+ self.patch_embed3 = None
324
+ self.stage3 = nn.Sequential(*[
325
+ Block(
326
+ dim=embed_dim * 2,
327
+ num_heads=num_heads,
328
+ head_dim_ratio=1.0,
329
+ mlp_ratio=mlp_ratio,
330
+ proj_drop=proj_drop_rate,
331
+ attn_drop=attn_drop_rate,
332
+ drop_path=dpr[i],
333
+ norm_layer=norm_layer,
334
+ group=group,
335
+ attn_disabled=(attn_stage[2] == '0'),
336
+ spatial_conv=(spatial_conv[2] == '1'),
337
+ )
338
+ for i in range(self.stage_num1+self.stage_num2, depth)
339
+ ])
340
+
341
+ self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2
342
+ self.norm = norm_layer(self.num_features)
343
+
344
+ # head
345
+ global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
346
+ self.global_pool = global_pool
347
+ self.head_drop = nn.Dropout(drop_rate)
348
+ self.head = head
349
+
350
+ # weights init
351
+ if self.use_pos_embed:
352
+ trunc_normal_(self.pos_embed1, std=0.02)
353
+ if not self.vit_stem:
354
+ trunc_normal_(self.pos_embed2, std=0.02)
355
+ trunc_normal_(self.pos_embed3, std=0.02)
356
+ self.apply(self._init_weights)
357
+
358
+ def _init_weights(self, m):
359
+ if isinstance(m, nn.Linear):
360
+ trunc_normal_(m.weight, std=0.02)
361
+ if m.bias is not None:
362
+ nn.init.constant_(m.bias, 0)
363
+ elif isinstance(m, nn.Conv2d):
364
+ if self.conv_init:
365
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
366
+ else:
367
+ trunc_normal_(m.weight, std=0.02)
368
+ if m.bias is not None:
369
+ nn.init.constant_(m.bias, 0.)
370
+
371
+ @torch.jit.ignore
372
+ def group_matcher(self, coarse=False):
373
+ return dict(
374
+ stem=r'^patch_embed1|pos_embed1|stem', # stem and embed
375
+ blocks=[
376
+ (r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None),
377
+ (r'^(?:patch_embed|pos_embed)(\d+)', (0,)),
378
+ (r'^norm', (99999,))
379
+ ]
380
+ )
381
+
382
+ @torch.jit.ignore
383
+ def set_grad_checkpointing(self, enable=True):
384
+ self.grad_checkpointing = enable
385
+
386
+ @torch.jit.ignore
387
+ def get_classifier(self) -> nn.Module:
388
+ return self.head
389
+
390
+ def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
391
+ self.num_classes = num_classes
392
+ self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
393
+
394
+ def forward_features(self, x):
395
+ if self.stem is not None:
396
+ x = self.stem(x)
397
+
398
+ # stage 1
399
+ x = self.patch_embed1(x)
400
+ if self.pos_embed1 is not None:
401
+ x = self.pos_drop(x + self.pos_embed1)
402
+ if self.grad_checkpointing and not torch.jit.is_scripting():
403
+ x = checkpoint_seq(self.stage1, x)
404
+ else:
405
+ x = self.stage1(x)
406
+
407
+ # stage 2
408
+ if self.patch_embed2 is not None:
409
+ x = self.patch_embed2(x)
410
+ if self.pos_embed2 is not None:
411
+ x = self.pos_drop(x + self.pos_embed2)
412
+ if self.grad_checkpointing and not torch.jit.is_scripting():
413
+ x = checkpoint_seq(self.stage2, x)
414
+ else:
415
+ x = self.stage2(x)
416
+
417
+ # stage3
418
+ if self.patch_embed3 is not None:
419
+ x = self.patch_embed3(x)
420
+ if self.pos_embed3 is not None:
421
+ x = self.pos_drop(x + self.pos_embed3)
422
+ if self.grad_checkpointing and not torch.jit.is_scripting():
423
+ x = checkpoint_seq(self.stage3, x)
424
+ else:
425
+ x = self.stage3(x)
426
+
427
+ x = self.norm(x)
428
+ return x
429
+
430
+ def forward_head(self, x, pre_logits: bool = False):
431
+ x = self.global_pool(x)
432
+ x = self.head_drop(x)
433
+ return x if pre_logits else self.head(x)
434
+
435
+ def forward(self, x):
436
+ x = self.forward_features(x)
437
+ x = self.forward_head(x)
438
+ return x
439
+
440
+
441
+ def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
442
+ if kwargs.get('features_only', None):
443
+ raise RuntimeError('features_only not implemented for Vision Transformer models.')
444
+ model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs)
445
+ return model
446
+
447
+
448
+ def _cfg(url='', **kwargs):
449
+ return {
450
+ 'url': url,
451
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
452
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
453
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
454
+ 'first_conv': 'stem.0', 'classifier': 'head',
455
+ **kwargs
456
+ }
457
+
458
+
459
+ default_cfgs = generate_default_cfgs({
460
+ 'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'),
461
+ 'visformer_small.in1k': _cfg(hf_hub_id='timm/'),
462
+ })
463
+
464
+
465
+ @register_model
466
+ def visformer_tiny(pretrained=False, **kwargs) -> Visformer:
467
+ model_cfg = dict(
468
+ init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
469
+ attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
470
+ embed_norm=nn.BatchNorm2d)
471
+ model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
472
+ return model
473
+
474
+
475
+ @register_model
476
+ def visformer_small(pretrained=False, **kwargs) -> Visformer:
477
+ model_cfg = dict(
478
+ init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
479
+ attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
480
+ embed_norm=nn.BatchNorm2d)
481
+ model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
482
+ return model
483
+
484
+
485
+ # @register_model
486
+ # def visformer_net1(pretrained=False, **kwargs):
487
+ # model = Visformer(
488
+ # init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
489
+ # spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
490
+ # model.default_cfg = _cfg()
491
+ # return model
492
+ #
493
+ #
494
+ # @register_model
495
+ # def visformer_net2(pretrained=False, **kwargs):
496
+ # model = Visformer(
497
+ # init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
498
+ # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
499
+ # model.default_cfg = _cfg()
500
+ # return model
501
+ #
502
+ #
503
+ # @register_model
504
+ # def visformer_net3(pretrained=False, **kwargs):
505
+ # model = Visformer(
506
+ # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
507
+ # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
508
+ # model.default_cfg = _cfg()
509
+ # return model
510
+ #
511
+ #
512
+ # @register_model
513
+ # def visformer_net4(pretrained=False, **kwargs):
514
+ # model = Visformer(
515
+ # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
516
+ # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
517
+ # model.default_cfg = _cfg()
518
+ # return model
519
+ #
520
+ #
521
+ # @register_model
522
+ # def visformer_net5(pretrained=False, **kwargs):
523
+ # model = Visformer(
524
+ # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
525
+ # spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
526
+ # model.default_cfg = _cfg()
527
+ # return model
528
+ #
529
+ #
530
+ # @register_model
531
+ # def visformer_net6(pretrained=False, **kwargs):
532
+ # model = Visformer(
533
+ # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
534
+ # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
535
+ # model.default_cfg = _cfg()
536
+ # return model
537
+ #
538
+ #
539
+ # @register_model
540
+ # def visformer_net7(pretrained=False, **kwargs):
541
+ # model = Visformer(
542
+ # init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
543
+ # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
544
+ # model.default_cfg = _cfg()
545
+ # return model
546
+
547
+
548
+
549
+