Upload 103 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- models/__init__.py +24 -0
- models/__pycache__/__init__.cpython-310.pyc +0 -0
- models/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/__init__.py +13 -0
- models/backbone/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/__init__.py +0 -0
- models/backbone/backbone_2d/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/__pycache__/backbone_2d.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/__pycache__/backbone_2d.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/backbone_2d.py +26 -0
- models/backbone/backbone_2d/cnn_2d/__init__.py +18 -0
- models/backbone/backbone_2d/cnn_2d/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__init__.py +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_backbone.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_backbone.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_basic.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_basic.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_fpn.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_fpn.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_head.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_head.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_neck.cpython-310.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_neck.cpython-37.pyc +0 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free.py +222 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_backbone.py +445 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_basic.py +164 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_fpn.py +252 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_head.py +51 -0
- models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_neck.py +164 -0
- models/backbone/backbone_3d/__init__.py +0 -0
- models/backbone/backbone_3d/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/backbone_3d/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_3d/__pycache__/backbone_3d.cpython-310.pyc +0 -0
- models/backbone/backbone_3d/__pycache__/backbone_3d.cpython-37.pyc +0 -0
- models/backbone/backbone_3d/backbone_3d.py +68 -0
- models/backbone/backbone_3d/cnn_3d/__init__.py +30 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/__init__.cpython-310.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/__init__.cpython-37.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/resnet.cpython-310.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/resnet.cpython-37.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/resnext.cpython-310.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/resnext.cpython-37.pyc +0 -0
- models/backbone/backbone_3d/cnn_3d/__pycache__/shufflnetv2.cpython-310.pyc +0 -0
models/__init__.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .yowo.build import build_yowo
|
2 |
+
|
3 |
+
|
4 |
+
def build_model(args,
|
5 |
+
d_cfg,
|
6 |
+
m_cfg,
|
7 |
+
device,
|
8 |
+
num_classes=3,
|
9 |
+
trainable=False,
|
10 |
+
resume=None):
|
11 |
+
# build action detector
|
12 |
+
if 'yowo_v2_' in args.version:
|
13 |
+
model, criterion = build_yowo(
|
14 |
+
args=args,
|
15 |
+
d_cfg=d_cfg,
|
16 |
+
m_cfg=m_cfg,
|
17 |
+
device=device,
|
18 |
+
num_classes=num_classes,
|
19 |
+
trainable=trainable,
|
20 |
+
resume=resume
|
21 |
+
)
|
22 |
+
|
23 |
+
return model, criterion
|
24 |
+
|
models/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (503 Bytes). View file
|
|
models/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (483 Bytes). View file
|
|
models/backbone/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .backbone_2d.backbone_2d import Backbone2D
|
2 |
+
from .backbone_3d.backbone_3d import Backbone3D
|
3 |
+
|
4 |
+
|
5 |
+
def build_backbone_2d(cfg, pretrained=False):
|
6 |
+
backbone = Backbone2D(cfg, pretrained)
|
7 |
+
return backbone, backbone.feat_dims
|
8 |
+
|
9 |
+
|
10 |
+
def build_backbone_3d(cfg, pretrained=False):
|
11 |
+
backbone = Backbone3D(cfg, pretrained)
|
12 |
+
return backbone, backbone.feat_dim
|
13 |
+
|
models/backbone/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (550 Bytes). View file
|
|
models/backbone/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (573 Bytes). View file
|
|
models/backbone/backbone_2d/__init__.py
ADDED
File without changes
|
models/backbone/backbone_2d/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (151 Bytes). View file
|
|
models/backbone/backbone_2d/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (145 Bytes). View file
|
|
models/backbone/backbone_2d/__pycache__/backbone_2d.cpython-310.pyc
ADDED
Binary file (1.09 kB). View file
|
|
models/backbone/backbone_2d/__pycache__/backbone_2d.cpython-37.pyc
ADDED
Binary file (1.07 kB). View file
|
|
models/backbone/backbone_2d/backbone_2d.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
from .cnn_2d import build_2d_cnn
|
3 |
+
|
4 |
+
|
5 |
+
class Backbone2D(nn.Module):
|
6 |
+
def __init__(self, cfg, pretrained=False):
|
7 |
+
super().__init__()
|
8 |
+
self.cfg = cfg
|
9 |
+
|
10 |
+
self.backbone, self.feat_dims = build_2d_cnn(cfg, pretrained)
|
11 |
+
|
12 |
+
|
13 |
+
def forward(self, x):
|
14 |
+
"""
|
15 |
+
Input:
|
16 |
+
x: (Tensor) -> [B, C, H, W]
|
17 |
+
Output:
|
18 |
+
y: (List) -> [
|
19 |
+
(Tensor) -> [B, C1, H1, W1],
|
20 |
+
(Tensor) -> [B, C2, H2, W2],
|
21 |
+
(Tensor) -> [B, C3, H3, W3]
|
22 |
+
]
|
23 |
+
"""
|
24 |
+
feat = self.backbone(x)
|
25 |
+
|
26 |
+
return feat
|
models/backbone/backbone_2d/cnn_2d/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import 2D backbone
|
2 |
+
from .yolo_free.yolo_free import build_yolo_free
|
3 |
+
|
4 |
+
|
5 |
+
def build_2d_cnn(cfg, pretrained=False):
|
6 |
+
print('==============================')
|
7 |
+
print('2D Backbone: {}'.format(cfg['backbone_2d'].upper()))
|
8 |
+
print('--pretrained: {}'.format(pretrained))
|
9 |
+
|
10 |
+
if cfg['backbone_2d'] in ['yolo_free_nano', 'yolo_free_tiny', \
|
11 |
+
'yolo_free_large', 'yolo_free_huge']:
|
12 |
+
model, feat_dims = build_yolo_free(cfg['backbone_2d'], pretrained)
|
13 |
+
|
14 |
+
else:
|
15 |
+
print('Unknown 2D Backbone ...')
|
16 |
+
exit()
|
17 |
+
|
18 |
+
return model, feat_dims
|
models/backbone/backbone_2d/cnn_2d/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (680 Bytes). View file
|
|
models/backbone/backbone_2d/cnn_2d/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (662 Bytes). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__init__.py
ADDED
File without changes
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (168 Bytes). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (162 Bytes). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free.cpython-310.pyc
ADDED
Binary file (4.41 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free.cpython-37.pyc
ADDED
Binary file (4.1 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_backbone.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_backbone.cpython-37.pyc
ADDED
Binary file (11.5 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_basic.cpython-310.pyc
ADDED
Binary file (4.52 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_basic.cpython-37.pyc
ADDED
Binary file (4.45 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_fpn.cpython-310.pyc
ADDED
Binary file (5.93 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_fpn.cpython-37.pyc
ADDED
Binary file (6.03 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_head.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_head.cpython-37.pyc
ADDED
Binary file (1.8 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_neck.cpython-310.pyc
ADDED
Binary file (4.33 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/__pycache__/yolo_free_neck.cpython-37.pyc
ADDED
Binary file (4.49 kB). View file
|
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch.hub import load_state_dict_from_url
|
6 |
+
|
7 |
+
try:
|
8 |
+
from .yolo_free_backbone import build_backbone
|
9 |
+
from .yolo_free_neck import build_neck
|
10 |
+
from .yolo_free_fpn import build_fpn
|
11 |
+
from .yolo_free_head import build_head
|
12 |
+
except:
|
13 |
+
from yolo_free_backbone import build_backbone
|
14 |
+
from yolo_free_neck import build_neck
|
15 |
+
from yolo_free_fpn import build_fpn
|
16 |
+
from yolo_free_head import build_head
|
17 |
+
|
18 |
+
|
19 |
+
__all__ = ['build_yolo_free']
|
20 |
+
|
21 |
+
|
22 |
+
model_urls = {
|
23 |
+
'yolo_free_nano': 'https://github.com/yjh0410/FreeYOLO/releases/download/weight/yolo_free_nano_coco.pth',
|
24 |
+
'yolo_free_tiny': 'https://github.com/yjh0410/FreeYOLO/releases/download/weight/yolo_free_tiny_coco.pth',
|
25 |
+
'yolo_free_large': 'https://github.com/yjh0410/FreeYOLO/releases/download/weight/yolo_free_large_coco.pth',
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
yolo_free_config = {
|
30 |
+
'yolo_free_nano': {
|
31 |
+
# model
|
32 |
+
'backbone': 'shufflenetv2_1.0x',
|
33 |
+
'pretrained': True,
|
34 |
+
'stride': [8, 16, 32], # P3, P4, P5
|
35 |
+
'anchor_size': None,
|
36 |
+
# neck
|
37 |
+
'neck': 'sppf',
|
38 |
+
'neck_dim': 232,
|
39 |
+
'expand_ratio': 0.5,
|
40 |
+
'pooling_size': 5,
|
41 |
+
'neck_act': 'lrelu',
|
42 |
+
'neck_norm': 'BN',
|
43 |
+
'neck_depthwise': True,
|
44 |
+
# fpn
|
45 |
+
'fpn': 'pafpn_elan',
|
46 |
+
'fpn_size': 'nano',
|
47 |
+
'fpn_dim': [116, 232, 232],
|
48 |
+
'fpn_norm': 'BN',
|
49 |
+
'fpn_act': 'lrelu',
|
50 |
+
'fpn_depthwise': True,
|
51 |
+
# head
|
52 |
+
'head': 'decoupled_head',
|
53 |
+
'head_dim': 64,
|
54 |
+
'head_norm': 'BN',
|
55 |
+
'head_act': 'lrelu',
|
56 |
+
'num_cls_head': 2,
|
57 |
+
'num_reg_head': 2,
|
58 |
+
'head_depthwise': True,
|
59 |
+
},
|
60 |
+
|
61 |
+
'yolo_free_tiny': {
|
62 |
+
# model
|
63 |
+
'backbone': 'elannet_tiny',
|
64 |
+
'pretrained': True,
|
65 |
+
'stride': [8, 16, 32], # P3, P4, P5
|
66 |
+
# neck
|
67 |
+
'neck': 'spp_block_csp',
|
68 |
+
'neck_dim': 256,
|
69 |
+
'expand_ratio': 0.5,
|
70 |
+
'pooling_size': [5, 9, 13],
|
71 |
+
'neck_act': 'lrelu',
|
72 |
+
'neck_norm': 'BN',
|
73 |
+
'neck_depthwise': False,
|
74 |
+
# fpn
|
75 |
+
'fpn': 'pafpn_elan',
|
76 |
+
'fpn_size': 'tiny', # 'tiny', 'large', 'huge
|
77 |
+
'fpn_dim': [128, 256, 256],
|
78 |
+
'fpn_norm': 'BN',
|
79 |
+
'fpn_act': 'lrelu',
|
80 |
+
'fpn_depthwise': False,
|
81 |
+
# head
|
82 |
+
'head': 'decoupled_head',
|
83 |
+
'head_dim': 64,
|
84 |
+
'head_norm': 'BN',
|
85 |
+
'head_act': 'lrelu',
|
86 |
+
'num_cls_head': 2,
|
87 |
+
'num_reg_head': 2,
|
88 |
+
'head_depthwise': False,
|
89 |
+
},
|
90 |
+
|
91 |
+
'yolo_free_large': {
|
92 |
+
# model
|
93 |
+
'backbone': 'elannet_large',
|
94 |
+
'pretrained': True,
|
95 |
+
'stride': [8, 16, 32], # P3, P4, P5
|
96 |
+
# neck
|
97 |
+
'neck': 'spp_block_csp',
|
98 |
+
'neck_dim': 512,
|
99 |
+
'expand_ratio': 0.5,
|
100 |
+
'pooling_size': [5, 9, 13],
|
101 |
+
'neck_act': 'silu',
|
102 |
+
'neck_norm': 'BN',
|
103 |
+
'neck_depthwise': False,
|
104 |
+
# fpn
|
105 |
+
'fpn': 'pafpn_elan',
|
106 |
+
'fpn_size': 'large', # 'tiny', 'large', 'huge
|
107 |
+
'fpn_dim': [512, 1024, 512],
|
108 |
+
'fpn_norm': 'BN',
|
109 |
+
'fpn_act': 'silu',
|
110 |
+
'fpn_depthwise': False,
|
111 |
+
# head
|
112 |
+
'head': 'decoupled_head',
|
113 |
+
'head_dim': 256,
|
114 |
+
'head_norm': 'BN',
|
115 |
+
'head_act': 'silu',
|
116 |
+
'num_cls_head': 2,
|
117 |
+
'num_reg_head': 2,
|
118 |
+
'head_depthwise': False,
|
119 |
+
},
|
120 |
+
|
121 |
+
}
|
122 |
+
|
123 |
+
|
124 |
+
# Anchor-free YOLO
|
125 |
+
class FreeYOLO(nn.Module):
|
126 |
+
def __init__(self, cfg):
|
127 |
+
super(FreeYOLO, self).__init__()
|
128 |
+
# --------- Basic Config -----------
|
129 |
+
self.cfg = cfg
|
130 |
+
|
131 |
+
# --------- Network Parameters ----------
|
132 |
+
## backbone
|
133 |
+
self.backbone, bk_dim = build_backbone(self.cfg['backbone'])
|
134 |
+
|
135 |
+
## neck
|
136 |
+
self.neck = build_neck(cfg=self.cfg, in_dim=bk_dim[-1], out_dim=self.cfg['neck_dim'])
|
137 |
+
|
138 |
+
## fpn
|
139 |
+
self.fpn = build_fpn(cfg=self.cfg, in_dims=self.cfg['fpn_dim'], out_dim=self.cfg['head_dim'])
|
140 |
+
|
141 |
+
## non-shared heads
|
142 |
+
self.non_shared_heads = nn.ModuleList(
|
143 |
+
[build_head(cfg)
|
144 |
+
for _ in range(len(cfg['stride']))
|
145 |
+
])
|
146 |
+
|
147 |
+
def forward(self, x):
|
148 |
+
# backbone
|
149 |
+
feats = self.backbone(x)
|
150 |
+
|
151 |
+
# neck
|
152 |
+
feats['layer4'] = self.neck(feats['layer4'])
|
153 |
+
|
154 |
+
# fpn
|
155 |
+
pyramid_feats = [feats['layer2'], feats['layer3'], feats['layer4']]
|
156 |
+
pyramid_feats = self.fpn(pyramid_feats)
|
157 |
+
|
158 |
+
# non-shared heads
|
159 |
+
all_cls_feats = []
|
160 |
+
all_reg_feats = []
|
161 |
+
for feat, head in zip(pyramid_feats, self.non_shared_heads):
|
162 |
+
# [B, C, H, W]
|
163 |
+
cls_feat, reg_feat = head(feat)
|
164 |
+
|
165 |
+
all_cls_feats.append(cls_feat)
|
166 |
+
all_reg_feats.append(reg_feat)
|
167 |
+
|
168 |
+
return all_cls_feats, all_reg_feats
|
169 |
+
|
170 |
+
|
171 |
+
# build FreeYOLO
|
172 |
+
def build_yolo_free(model_name='yolo_free_large', pretrained=False):
|
173 |
+
# model config
|
174 |
+
cfg = yolo_free_config[model_name]
|
175 |
+
|
176 |
+
# FreeYOLO
|
177 |
+
model = FreeYOLO(cfg)
|
178 |
+
feat_dims = [model.cfg['head_dim']] * 3
|
179 |
+
|
180 |
+
# Load COCO pretrained weight
|
181 |
+
if pretrained:
|
182 |
+
url = model_urls[model_name]
|
183 |
+
|
184 |
+
# check
|
185 |
+
if url is None:
|
186 |
+
print('No 2D pretrained weight ...')
|
187 |
+
return model, feat_dims
|
188 |
+
else:
|
189 |
+
print('Loading 2D backbone pretrained weight: {}'.format(model_name.upper()))
|
190 |
+
|
191 |
+
# state dict
|
192 |
+
checkpoint = load_state_dict_from_url(url, map_location='cpu')
|
193 |
+
checkpoint_state_dict = checkpoint.pop('model')
|
194 |
+
|
195 |
+
# model state dict
|
196 |
+
model_state_dict = model.state_dict()
|
197 |
+
# check
|
198 |
+
for k in list(checkpoint_state_dict.keys()):
|
199 |
+
if k in model_state_dict:
|
200 |
+
shape_model = tuple(model_state_dict[k].shape)
|
201 |
+
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
|
202 |
+
if shape_model != shape_checkpoint:
|
203 |
+
# print(k)
|
204 |
+
checkpoint_state_dict.pop(k)
|
205 |
+
else:
|
206 |
+
checkpoint_state_dict.pop(k)
|
207 |
+
# print(k)
|
208 |
+
|
209 |
+
model.load_state_dict(checkpoint_state_dict, strict=False)
|
210 |
+
|
211 |
+
return model, feat_dims
|
212 |
+
|
213 |
+
|
214 |
+
if __name__ == '__main__':
|
215 |
+
model, fpn_dim = build_yolo_free(model_name='yolo_free_nano', pretrained=True)
|
216 |
+
model.eval()
|
217 |
+
|
218 |
+
x = torch.randn(2, 3, 64, 64)
|
219 |
+
cls_feats, reg_feats = model(x)
|
220 |
+
|
221 |
+
for cls_feat, reg_feat in zip(cls_feats, reg_feats):
|
222 |
+
print(cls_feat.shape, reg_feat.shape)
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_backbone.py
ADDED
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
|
6 |
+
__all__ = ['build_backbone']
|
7 |
+
|
8 |
+
# ====================== ELAN-Net ==========================
|
9 |
+
# ELANNet
|
10 |
+
def get_activation(act_type=None):
|
11 |
+
if act_type is None:
|
12 |
+
return nn.Identity()
|
13 |
+
elif act_type == 'relu':
|
14 |
+
return nn.ReLU(inplace=True)
|
15 |
+
elif act_type == 'lrelu':
|
16 |
+
return nn.LeakyReLU(0.1, inplace=True)
|
17 |
+
elif act_type == 'mish':
|
18 |
+
return nn.Mish(inplace=True)
|
19 |
+
elif act_type == 'silu':
|
20 |
+
return nn.SiLU(inplace=True)
|
21 |
+
|
22 |
+
|
23 |
+
def get_norm(in_dim, norm_type=None):
|
24 |
+
if norm_type is None:
|
25 |
+
return nn.Identity()
|
26 |
+
elif norm_type == 'BN':
|
27 |
+
return nn.BatchNorm2d(in_dim)
|
28 |
+
elif norm_type == 'GN':
|
29 |
+
return nn.GroupNorm(32, in_dim)
|
30 |
+
elif norm_type == 'IN':
|
31 |
+
return nn.InstanceNorm2d(in_dim)
|
32 |
+
|
33 |
+
|
34 |
+
class Conv(nn.Module):
|
35 |
+
def __init__(self,
|
36 |
+
c1, # in channels
|
37 |
+
c2, # out channels
|
38 |
+
k=1, # kernel size
|
39 |
+
p=0, # padding
|
40 |
+
s=1, # padding
|
41 |
+
d=1, # dilation
|
42 |
+
act_type='silu',
|
43 |
+
norm_type='BN', # activation
|
44 |
+
depthwise=False):
|
45 |
+
super(Conv, self).__init__()
|
46 |
+
convs = []
|
47 |
+
add_bias = False if norm_type else True
|
48 |
+
if depthwise:
|
49 |
+
# depthwise conv
|
50 |
+
convs.append(nn.Conv2d(c1, c1, kernel_size=k, stride=s, padding=p, dilation=d, groups=c1, bias=add_bias))
|
51 |
+
convs.append(get_norm(c1, norm_type))
|
52 |
+
convs.append(get_activation(act_type))
|
53 |
+
|
54 |
+
# pointwise conv
|
55 |
+
convs.append(nn.Conv2d(c1, c2, kernel_size=1, stride=s, padding=0, dilation=d, groups=1, bias=add_bias))
|
56 |
+
convs.append(get_norm(c2, norm_type))
|
57 |
+
convs.append(get_activation(act_type))
|
58 |
+
|
59 |
+
else:
|
60 |
+
convs.append(nn.Conv2d(c1, c2, kernel_size=k, stride=s, padding=p, dilation=d, groups=1, bias=add_bias))
|
61 |
+
convs.append(get_norm(c2, norm_type))
|
62 |
+
convs.append(get_activation(act_type))
|
63 |
+
|
64 |
+
self.convs = nn.Sequential(*convs)
|
65 |
+
|
66 |
+
|
67 |
+
def forward(self, x):
|
68 |
+
return self.convs(x)
|
69 |
+
|
70 |
+
|
71 |
+
class ELANBlock(nn.Module):
|
72 |
+
"""
|
73 |
+
ELAN BLock of YOLOv7's backbone
|
74 |
+
"""
|
75 |
+
def __init__(self, in_dim, out_dim, expand_ratio=0.5, model_size='large', act_type='silu', depthwise=False):
|
76 |
+
super(ELANBlock, self).__init__()
|
77 |
+
inter_dim = int(in_dim * expand_ratio)
|
78 |
+
if model_size == 'tiny':
|
79 |
+
depth = 1
|
80 |
+
elif model_size == 'large':
|
81 |
+
depth = 2
|
82 |
+
elif model_size == 'huge':
|
83 |
+
depth = 3
|
84 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type)
|
85 |
+
self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type)
|
86 |
+
self.cv3 = nn.Sequential(*[
|
87 |
+
Conv(inter_dim, inter_dim, k=3, p=1, act_type=act_type, depthwise=depthwise)
|
88 |
+
for _ in range(depth)
|
89 |
+
])
|
90 |
+
self.cv4 = nn.Sequential(*[
|
91 |
+
Conv(inter_dim, inter_dim, k=3, p=1, act_type=act_type, depthwise=depthwise)
|
92 |
+
for _ in range(depth)
|
93 |
+
])
|
94 |
+
|
95 |
+
self.out = Conv(inter_dim*4, out_dim, k=1)
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
def forward(self, x):
|
100 |
+
"""
|
101 |
+
Input:
|
102 |
+
x: [B, C, H, W]
|
103 |
+
Output:
|
104 |
+
out: [B, 2C, H, W]
|
105 |
+
"""
|
106 |
+
x1 = self.cv1(x)
|
107 |
+
x2 = self.cv2(x)
|
108 |
+
x3 = self.cv3(x2)
|
109 |
+
x4 = self.cv4(x3)
|
110 |
+
|
111 |
+
# [B, C, H, W] -> [B, 2C, H, W]
|
112 |
+
out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
|
113 |
+
|
114 |
+
return out
|
115 |
+
|
116 |
+
|
117 |
+
class DownSample(nn.Module):
|
118 |
+
def __init__(self, in_dim, act_type='silu', norm_type='BN'):
|
119 |
+
super().__init__()
|
120 |
+
inter_dim = in_dim // 2
|
121 |
+
self.mp = nn.MaxPool2d((2, 2), 2)
|
122 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
123 |
+
self.cv2 = nn.Sequential(
|
124 |
+
Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
|
125 |
+
Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
|
126 |
+
)
|
127 |
+
|
128 |
+
def forward(self, x):
|
129 |
+
"""
|
130 |
+
Input:
|
131 |
+
x: [B, C, H, W]
|
132 |
+
Output:
|
133 |
+
out: [B, C, H//2, W//2]
|
134 |
+
"""
|
135 |
+
# [B, C, H, W] -> [B, C//2, H//2, W//2]
|
136 |
+
x1 = self.cv1(self.mp(x))
|
137 |
+
x2 = self.cv2(x)
|
138 |
+
|
139 |
+
# [B, C, H//2, W//2]
|
140 |
+
out = torch.cat([x1, x2], dim=1)
|
141 |
+
|
142 |
+
return out
|
143 |
+
|
144 |
+
|
145 |
+
# ELANNet-Tiny
|
146 |
+
class ELANNet_Tiny(nn.Module):
|
147 |
+
"""
|
148 |
+
ELAN-Net of YOLOv7-Tiny.
|
149 |
+
"""
|
150 |
+
def __init__(self, depthwise=False):
|
151 |
+
super(ELANNet_Tiny, self).__init__()
|
152 |
+
|
153 |
+
# tiny backbone
|
154 |
+
self.layer_1 = Conv(3, 32, k=3, p=1, s=2, act_type='lrelu', depthwise=depthwise) # P1/2
|
155 |
+
|
156 |
+
self.layer_2 = nn.Sequential(
|
157 |
+
Conv(32, 64, k=3, p=1, s=2, act_type='lrelu', depthwise=depthwise),
|
158 |
+
ELANBlock(in_dim=64, out_dim=64, expand_ratio=0.5,
|
159 |
+
model_size='tiny', act_type='lrelu', depthwise=depthwise) # P2/4
|
160 |
+
)
|
161 |
+
self.layer_3 = nn.Sequential(
|
162 |
+
nn.MaxPool2d((2, 2), 2),
|
163 |
+
ELANBlock(in_dim=64, out_dim=128, expand_ratio=0.5,
|
164 |
+
model_size='tiny', act_type='lrelu', depthwise=depthwise) # P3/8
|
165 |
+
)
|
166 |
+
self.layer_4 = nn.Sequential(
|
167 |
+
nn.MaxPool2d((2, 2), 2),
|
168 |
+
ELANBlock(in_dim=128, out_dim=256, expand_ratio=0.5,
|
169 |
+
model_size='tiny', act_type='lrelu', depthwise=depthwise) # P4/16
|
170 |
+
)
|
171 |
+
self.layer_5 = nn.Sequential(
|
172 |
+
nn.MaxPool2d((2, 2), 2),
|
173 |
+
ELANBlock(in_dim=256, out_dim=512, expand_ratio=0.5,
|
174 |
+
model_size='tiny', act_type='lrelu', depthwise=depthwise) # P5/32
|
175 |
+
)
|
176 |
+
|
177 |
+
|
178 |
+
def forward(self, x):
|
179 |
+
c1 = self.layer_1(x)
|
180 |
+
c2 = self.layer_2(c1)
|
181 |
+
c3 = self.layer_3(c2)
|
182 |
+
c4 = self.layer_4(c3)
|
183 |
+
c5 = self.layer_5(c4)
|
184 |
+
|
185 |
+
outputs = {
|
186 |
+
'layer2': c3,
|
187 |
+
'layer3': c4,
|
188 |
+
'layer4': c5
|
189 |
+
}
|
190 |
+
return outputs
|
191 |
+
|
192 |
+
|
193 |
+
# ELANNet-Large
|
194 |
+
class ELANNet_Large(nn.Module):
|
195 |
+
"""
|
196 |
+
ELAN-Net of YOLOv7.
|
197 |
+
"""
|
198 |
+
def __init__(self, depthwise=False):
|
199 |
+
super(ELANNet_Large, self).__init__()
|
200 |
+
|
201 |
+
# large backbone
|
202 |
+
self.layer_1 = nn.Sequential(
|
203 |
+
Conv(3, 32, k=3, p=1, act_type='silu', depthwise=depthwise),
|
204 |
+
Conv(32, 64, k=3, p=1, s=2, act_type='silu', depthwise=depthwise),
|
205 |
+
Conv(64, 64, k=3, p=1, act_type='silu', depthwise=depthwise) # P1/2
|
206 |
+
)
|
207 |
+
self.layer_2 = nn.Sequential(
|
208 |
+
Conv(64, 128, k=3, p=1, s=2, act_type='silu', depthwise=depthwise),
|
209 |
+
ELANBlock(in_dim=128, out_dim=256, expand_ratio=0.5,
|
210 |
+
model_size='large',act_type='silu', depthwise=depthwise) # P2/4
|
211 |
+
)
|
212 |
+
self.layer_3 = nn.Sequential(
|
213 |
+
DownSample(in_dim=256, act_type='silu'),
|
214 |
+
ELANBlock(in_dim=256, out_dim=512, expand_ratio=0.5,
|
215 |
+
model_size='large',act_type='silu', depthwise=depthwise) # P3/8
|
216 |
+
)
|
217 |
+
self.layer_4 = nn.Sequential(
|
218 |
+
DownSample(in_dim=512, act_type='silu'),
|
219 |
+
ELANBlock(in_dim=512, out_dim=1024, expand_ratio=0.5,
|
220 |
+
model_size='large',act_type='silu', depthwise=depthwise) # P4/16
|
221 |
+
)
|
222 |
+
self.layer_5 = nn.Sequential(
|
223 |
+
DownSample(in_dim=1024, act_type='silu'),
|
224 |
+
ELANBlock(in_dim=1024, out_dim=1024, expand_ratio=0.25,
|
225 |
+
model_size='large',act_type='silu', depthwise=depthwise) # P5/32
|
226 |
+
)
|
227 |
+
|
228 |
+
|
229 |
+
def forward(self, x):
|
230 |
+
c1 = self.layer_1(x)
|
231 |
+
c2 = self.layer_2(c1)
|
232 |
+
c3 = self.layer_3(c2)
|
233 |
+
c4 = self.layer_4(c3)
|
234 |
+
c5 = self.layer_5(c4)
|
235 |
+
|
236 |
+
outputs = {
|
237 |
+
'layer2': c3,
|
238 |
+
'layer3': c4,
|
239 |
+
'layer4': c5
|
240 |
+
}
|
241 |
+
return outputs
|
242 |
+
|
243 |
+
|
244 |
+
## build ELAN-Net
|
245 |
+
def build_elannet(model_name='elannet_large'):
|
246 |
+
# model
|
247 |
+
if model_name == 'elannet_large':
|
248 |
+
backbone = ELANNet_Large()
|
249 |
+
feat_dims = [512, 1024, 1024]
|
250 |
+
elif model_name == 'elannet_tiny':
|
251 |
+
backbone = ELANNet_Tiny()
|
252 |
+
feat_dims = [128, 256, 512]
|
253 |
+
|
254 |
+
return backbone, feat_dims
|
255 |
+
|
256 |
+
|
257 |
+
# ====================== ShuffleNet-v2 ==========================
|
258 |
+
# ShuffleNet-v2
|
259 |
+
def channel_shuffle(x, groups):
|
260 |
+
# type: (torch.Tensor, int) -> torch.Tensor
|
261 |
+
batchsize, num_channels, height, width = x.data.size()
|
262 |
+
channels_per_group = num_channels // groups
|
263 |
+
|
264 |
+
# reshape
|
265 |
+
x = x.view(batchsize, groups,
|
266 |
+
channels_per_group, height, width)
|
267 |
+
|
268 |
+
x = torch.transpose(x, 1, 2).contiguous()
|
269 |
+
|
270 |
+
# flatten
|
271 |
+
x = x.view(batchsize, -1, height, width)
|
272 |
+
|
273 |
+
return x
|
274 |
+
|
275 |
+
|
276 |
+
class ShuffleV2Block(nn.Module):
|
277 |
+
def __init__(self, inp, oup, stride):
|
278 |
+
super(ShuffleV2Block, self).__init__()
|
279 |
+
|
280 |
+
if not (1 <= stride <= 3):
|
281 |
+
raise ValueError('illegal stride value')
|
282 |
+
self.stride = stride
|
283 |
+
|
284 |
+
branch_features = oup // 2
|
285 |
+
assert (self.stride != 1) or (inp == branch_features << 1)
|
286 |
+
|
287 |
+
if self.stride > 1:
|
288 |
+
self.branch1 = nn.Sequential(
|
289 |
+
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
|
290 |
+
nn.BatchNorm2d(inp),
|
291 |
+
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
292 |
+
nn.BatchNorm2d(branch_features),
|
293 |
+
nn.ReLU(inplace=True),
|
294 |
+
)
|
295 |
+
else:
|
296 |
+
self.branch1 = nn.Sequential()
|
297 |
+
|
298 |
+
self.branch2 = nn.Sequential(
|
299 |
+
nn.Conv2d(inp if (self.stride > 1) else branch_features,
|
300 |
+
branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
301 |
+
nn.BatchNorm2d(branch_features),
|
302 |
+
nn.ReLU(inplace=True),
|
303 |
+
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
|
304 |
+
nn.BatchNorm2d(branch_features),
|
305 |
+
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
306 |
+
nn.BatchNorm2d(branch_features),
|
307 |
+
nn.ReLU(inplace=True),
|
308 |
+
)
|
309 |
+
|
310 |
+
@staticmethod
|
311 |
+
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
|
312 |
+
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
|
313 |
+
|
314 |
+
def forward(self, x):
|
315 |
+
if self.stride == 1:
|
316 |
+
x1, x2 = x.chunk(2, dim=1)
|
317 |
+
out = torch.cat((x1, self.branch2(x2)), dim=1)
|
318 |
+
else:
|
319 |
+
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
|
320 |
+
|
321 |
+
out = channel_shuffle(out, 2)
|
322 |
+
|
323 |
+
return out
|
324 |
+
|
325 |
+
|
326 |
+
class ShuffleNetV2(nn.Module):
|
327 |
+
def __init__(self,
|
328 |
+
model_size='1.0x',
|
329 |
+
out_stages=(2, 3, 4),
|
330 |
+
with_last_conv=False,
|
331 |
+
kernal_size=3):
|
332 |
+
super(ShuffleNetV2, self).__init__()
|
333 |
+
print('model size is ', model_size)
|
334 |
+
|
335 |
+
self.stage_repeats = [4, 8, 4]
|
336 |
+
self.model_size = model_size
|
337 |
+
self.out_stages = out_stages
|
338 |
+
self.with_last_conv = with_last_conv
|
339 |
+
self.kernal_size = kernal_size
|
340 |
+
if model_size == '0.5x':
|
341 |
+
self._stage_out_channels = [24, 48, 96, 192]
|
342 |
+
elif model_size == '1.0x':
|
343 |
+
self._stage_out_channels = [24, 116, 232, 464]
|
344 |
+
elif model_size == '1.5x':
|
345 |
+
self._stage_out_channels = [24, 176, 352, 704]
|
346 |
+
elif model_size == '2.0x':
|
347 |
+
self._stage_out_channels = [24, 244, 488, 976]
|
348 |
+
else:
|
349 |
+
raise NotImplementedError
|
350 |
+
|
351 |
+
# building first layer
|
352 |
+
input_channels = 3
|
353 |
+
output_channels = self._stage_out_channels[0]
|
354 |
+
self.conv1 = nn.Sequential(
|
355 |
+
nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
|
356 |
+
nn.BatchNorm2d(output_channels),
|
357 |
+
nn.ReLU(inplace=True),
|
358 |
+
)
|
359 |
+
input_channels = output_channels
|
360 |
+
|
361 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
362 |
+
|
363 |
+
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
|
364 |
+
for name, repeats, output_channels in zip(
|
365 |
+
stage_names, self.stage_repeats, self._stage_out_channels[1:]):
|
366 |
+
seq = [ShuffleV2Block(input_channels, output_channels, 2)]
|
367 |
+
for i in range(repeats - 1):
|
368 |
+
seq.append(ShuffleV2Block(output_channels, output_channels, 1))
|
369 |
+
setattr(self, name, nn.Sequential(*seq))
|
370 |
+
input_channels = output_channels
|
371 |
+
|
372 |
+
self._initialize_weights()
|
373 |
+
|
374 |
+
|
375 |
+
def _initialize_weights(self):
|
376 |
+
print('init weights...')
|
377 |
+
for name, m in self.named_modules():
|
378 |
+
if isinstance(m, nn.Conv2d):
|
379 |
+
if 'first' in name:
|
380 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
381 |
+
else:
|
382 |
+
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
|
383 |
+
if m.bias is not None:
|
384 |
+
nn.init.constant_(m.bias, 0)
|
385 |
+
elif isinstance(m, nn.BatchNorm2d):
|
386 |
+
nn.init.constant_(m.weight, 1)
|
387 |
+
if m.bias is not None:
|
388 |
+
nn.init.constant_(m.bias, 0.0001)
|
389 |
+
nn.init.constant_(m.running_mean, 0)
|
390 |
+
elif isinstance(m, nn.BatchNorm1d):
|
391 |
+
nn.init.constant_(m.weight, 1)
|
392 |
+
if m.bias is not None:
|
393 |
+
nn.init.constant_(m.bias, 0.0001)
|
394 |
+
nn.init.constant_(m.running_mean, 0)
|
395 |
+
elif isinstance(m, nn.Linear):
|
396 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
397 |
+
if m.bias is not None:
|
398 |
+
nn.init.constant_(m.bias, 0)
|
399 |
+
|
400 |
+
|
401 |
+
def forward(self, x):
|
402 |
+
x = self.conv1(x)
|
403 |
+
x = self.maxpool(x)
|
404 |
+
output = {}
|
405 |
+
for i in range(2, 5):
|
406 |
+
stage = getattr(self, 'stage{}'.format(i))
|
407 |
+
x = stage(x)
|
408 |
+
if i in self.out_stages:
|
409 |
+
output['layer{}'.format(i)] = x
|
410 |
+
|
411 |
+
return output
|
412 |
+
|
413 |
+
|
414 |
+
## build ShuffleNet-v2
|
415 |
+
def build_shufflenetv2(model_size='1.0x'):
|
416 |
+
"""Constructs a shufflenetv2 model.
|
417 |
+
|
418 |
+
Args:
|
419 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
420 |
+
"""
|
421 |
+
backbone = ShuffleNetV2(model_size=model_size)
|
422 |
+
feat_dims = backbone._stage_out_channels[1:]
|
423 |
+
|
424 |
+
return backbone, feat_dims
|
425 |
+
|
426 |
+
|
427 |
+
# build backbone
|
428 |
+
def build_backbone(model_name='elannet_large'):
|
429 |
+
if model_name in ['elannet_nano', 'elannet_tiny', 'elannet_large', 'elannet_huge']:
|
430 |
+
return build_elannet(model_name)
|
431 |
+
|
432 |
+
elif model_name in ['shufflenetv2_0.5x', 'shufflenetv2_1.0x']:
|
433 |
+
return build_shufflenetv2(model_size=model_name[-4:])
|
434 |
+
|
435 |
+
|
436 |
+
if __name__ == '__main__':
|
437 |
+
import time
|
438 |
+
model, feats = build_backbone(model_name='shufflenetv2_1.0x')
|
439 |
+
x = torch.randn(1, 3, 224, 224)
|
440 |
+
t0 = time.time()
|
441 |
+
outputs = model(x)
|
442 |
+
t1 = time.time()
|
443 |
+
print('Time: ', t1 - t0)
|
444 |
+
for k in outputs.keys():
|
445 |
+
print(outputs[k].shape)
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_basic.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
|
7 |
+
class SiLU(nn.Module):
|
8 |
+
"""export-friendly version of nn.SiLU()"""
|
9 |
+
|
10 |
+
@staticmethod
|
11 |
+
def forward(x):
|
12 |
+
return x * torch.sigmoid(x)
|
13 |
+
|
14 |
+
|
15 |
+
def get_conv2d(c1, c2, k, p, s, d, g, padding_mode='ZERO', bias=False):
|
16 |
+
if padding_mode == 'ZERO':
|
17 |
+
conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
|
18 |
+
elif padding_mode == 'SAME':
|
19 |
+
conv = Conv2dSamePadding(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
|
20 |
+
|
21 |
+
return conv
|
22 |
+
|
23 |
+
|
24 |
+
def get_activation(act_type=None):
|
25 |
+
if act_type == 'relu':
|
26 |
+
return nn.ReLU(inplace=True)
|
27 |
+
elif act_type == 'lrelu':
|
28 |
+
return nn.LeakyReLU(0.1, inplace=True)
|
29 |
+
elif act_type == 'mish':
|
30 |
+
return nn.Mish(inplace=True)
|
31 |
+
elif act_type == 'silu':
|
32 |
+
return nn.SiLU(inplace=True)
|
33 |
+
|
34 |
+
|
35 |
+
def get_norm(norm_type, dim):
|
36 |
+
if norm_type == 'BN':
|
37 |
+
return nn.BatchNorm2d(dim)
|
38 |
+
elif norm_type == 'GN':
|
39 |
+
return nn.GroupNorm(num_groups=32, num_channels=dim)
|
40 |
+
|
41 |
+
|
42 |
+
# Conv2d with "SAME" padding
|
43 |
+
class Conv2dSamePadding(nn.Conv2d):
|
44 |
+
"""
|
45 |
+
A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features.
|
46 |
+
"""
|
47 |
+
|
48 |
+
def __init__(self, *args, **kwargs):
|
49 |
+
"""
|
50 |
+
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
|
51 |
+
|
52 |
+
Args:
|
53 |
+
norm (nn.Module, optional): a normalization layer
|
54 |
+
activation (callable(Tensor) -> Tensor): a callable activation function
|
55 |
+
|
56 |
+
It assumes that norm layer is used before activation.
|
57 |
+
"""
|
58 |
+
|
59 |
+
# parse padding mode
|
60 |
+
self.padding_method = kwargs.pop("padding", None)
|
61 |
+
if self.padding_method is None:
|
62 |
+
if len(args) >= 5:
|
63 |
+
self.padding_method = args[4]
|
64 |
+
else:
|
65 |
+
self.padding_method = 0 # default padding number
|
66 |
+
|
67 |
+
if isinstance(self.padding_method, str):
|
68 |
+
if self.padding_method.upper() == "SAME":
|
69 |
+
# If the padding mode is `SAME`, it will be manually padded
|
70 |
+
super().__init__(*args, **kwargs, padding=0)
|
71 |
+
# stride
|
72 |
+
if isinstance(self.stride, int):
|
73 |
+
self.stride = [self.stride] * 2
|
74 |
+
elif len(self.stride) == 1:
|
75 |
+
self.stride = [self.stride[0]] * 2
|
76 |
+
# kernel size
|
77 |
+
if isinstance(self.kernel_size, int):
|
78 |
+
self.kernel_size = [self.kernel_size] * 2
|
79 |
+
elif len(self.kernel_size) == 1:
|
80 |
+
self.kernel_size = [self.kernel_size[0]] * 2
|
81 |
+
# dilation
|
82 |
+
if isinstance(self.dilation, int):
|
83 |
+
self.dilation = [self.dilation] * 2
|
84 |
+
elif len(self.dilation) == 1:
|
85 |
+
self.dilation = [self.dilation[0]] * 2
|
86 |
+
else:
|
87 |
+
raise ValueError("Unknown padding method: {}".format(self.padding_method))
|
88 |
+
else:
|
89 |
+
super().__init__(*args, **kwargs, padding=self.padding_method)
|
90 |
+
|
91 |
+
def forward(self, x):
|
92 |
+
if isinstance(self.padding_method, str):
|
93 |
+
if self.padding_method.upper() == "SAME":
|
94 |
+
input_h, input_w = x.shape[-2:]
|
95 |
+
stride_h, stride_w = self.stride
|
96 |
+
kernel_size_h, kernel_size_w = self.kernel_size
|
97 |
+
dilation_h, dilation_w = self.dilation
|
98 |
+
|
99 |
+
output_h = math.ceil(input_h / stride_h)
|
100 |
+
output_w = math.ceil(input_w / stride_w)
|
101 |
+
|
102 |
+
padding_needed_h = max(
|
103 |
+
0, (output_h - 1) * stride_h + (kernel_size_h - 1) * dilation_h + 1 - input_h
|
104 |
+
)
|
105 |
+
padding_needed_w = max(
|
106 |
+
0, (output_w - 1) * stride_w + (kernel_size_w - 1) * dilation_w + 1 - input_w
|
107 |
+
)
|
108 |
+
|
109 |
+
left = padding_needed_w // 2
|
110 |
+
right = padding_needed_w - left
|
111 |
+
top = padding_needed_h // 2
|
112 |
+
bottom = padding_needed_h - top
|
113 |
+
|
114 |
+
x = F.pad(x, [left, right, top, bottom])
|
115 |
+
else:
|
116 |
+
raise ValueError("Unknown padding method: {}".format(self.padding_method))
|
117 |
+
|
118 |
+
x = super().forward(x)
|
119 |
+
|
120 |
+
return x
|
121 |
+
|
122 |
+
|
123 |
+
# Basic conv layer
|
124 |
+
class Conv(nn.Module):
|
125 |
+
def __init__(self,
|
126 |
+
c1, # in channels
|
127 |
+
c2, # out channels
|
128 |
+
k=1, # kernel size
|
129 |
+
p=0, # padding
|
130 |
+
s=1, # padding
|
131 |
+
d=1, # dilation
|
132 |
+
act_type='', # activation
|
133 |
+
norm_type='', # normalization
|
134 |
+
padding_mode='ZERO', # padding mode: "ZERO" or "SAME"
|
135 |
+
depthwise=False):
|
136 |
+
super(Conv, self).__init__()
|
137 |
+
convs = []
|
138 |
+
add_bias = False if norm_type else True
|
139 |
+
if depthwise:
|
140 |
+
convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, padding_mode=padding_mode, bias=add_bias))
|
141 |
+
# depthwise conv
|
142 |
+
if norm_type:
|
143 |
+
convs.append(get_norm(norm_type, c1))
|
144 |
+
if act_type:
|
145 |
+
convs.append(get_activation(act_type))
|
146 |
+
# pointwise conv
|
147 |
+
convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
|
148 |
+
if norm_type:
|
149 |
+
convs.append(get_norm(norm_type, c2))
|
150 |
+
if act_type:
|
151 |
+
convs.append(get_activation(act_type))
|
152 |
+
|
153 |
+
else:
|
154 |
+
convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, padding_mode=padding_mode, bias=add_bias))
|
155 |
+
if norm_type:
|
156 |
+
convs.append(get_norm(norm_type, c2))
|
157 |
+
if act_type:
|
158 |
+
convs.append(get_activation(act_type))
|
159 |
+
|
160 |
+
self.convs = nn.Sequential(*convs)
|
161 |
+
|
162 |
+
|
163 |
+
def forward(self, x):
|
164 |
+
return self.convs(x)
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_fpn.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
try:
|
6 |
+
from yolo_free_basic import Conv
|
7 |
+
except:
|
8 |
+
from .yolo_free_basic import Conv
|
9 |
+
|
10 |
+
|
11 |
+
class ELANBlock(nn.Module):
|
12 |
+
"""
|
13 |
+
ELAN BLock of YOLOv7's head
|
14 |
+
"""
|
15 |
+
def __init__(self, in_dim, out_dim, fpn_size='large', depthwise=False, act_type='silu', norm_type='BN'):
|
16 |
+
super(ELANBlock, self).__init__()
|
17 |
+
if fpn_size == 'tiny' or fpn_size =='nano':
|
18 |
+
e1, e2 = 0.25, 1.0
|
19 |
+
width = 2
|
20 |
+
depth = 1
|
21 |
+
elif fpn_size == 'large':
|
22 |
+
e1, e2 = 0.5, 0.5
|
23 |
+
width = 4
|
24 |
+
depth = 1
|
25 |
+
elif fpn_size == 'huge':
|
26 |
+
e1, e2 = 0.5, 0.5
|
27 |
+
width = 4
|
28 |
+
depth = 2
|
29 |
+
inter_dim = int(in_dim * e1)
|
30 |
+
inter_dim2 = int(inter_dim * e2)
|
31 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
32 |
+
self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
33 |
+
self.cv3 = nn.ModuleList()
|
34 |
+
for idx in range(width):
|
35 |
+
if idx == 0:
|
36 |
+
cvs = [Conv(inter_dim, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
|
37 |
+
else:
|
38 |
+
cvs = [Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
|
39 |
+
# deeper
|
40 |
+
if depth > 1:
|
41 |
+
for _ in range(1, depth):
|
42 |
+
cvs.append(Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
|
43 |
+
self.cv3.append(nn.Sequential(*cvs))
|
44 |
+
else:
|
45 |
+
self.cv3.append(cvs[0])
|
46 |
+
|
47 |
+
self.out = Conv(inter_dim*2+inter_dim2*len(self.cv3), out_dim, k=1, act_type=act_type, norm_type=norm_type)
|
48 |
+
|
49 |
+
|
50 |
+
def forward(self, x):
|
51 |
+
"""
|
52 |
+
Input:
|
53 |
+
x: [B, C_in, H, W]
|
54 |
+
Output:
|
55 |
+
out: [B, C_out, H, W]
|
56 |
+
"""
|
57 |
+
x1 = self.cv1(x)
|
58 |
+
x2 = self.cv2(x)
|
59 |
+
inter_outs = [x1, x2]
|
60 |
+
for m in self.cv3:
|
61 |
+
y1 = inter_outs[-1]
|
62 |
+
y2 = m(y1)
|
63 |
+
inter_outs.append(y2)
|
64 |
+
|
65 |
+
# [B, C_in, H, W] -> [B, C_out, H, W]
|
66 |
+
out = self.out(torch.cat(inter_outs, dim=1))
|
67 |
+
|
68 |
+
return out
|
69 |
+
|
70 |
+
|
71 |
+
class DownSample(nn.Module):
|
72 |
+
def __init__(self, in_dim, depthwise=False, act_type='silu', norm_type='BN'):
|
73 |
+
super().__init__()
|
74 |
+
inter_dim = in_dim
|
75 |
+
self.mp = nn.MaxPool2d((2, 2), 2)
|
76 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
77 |
+
self.cv2 = nn.Sequential(
|
78 |
+
Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
|
79 |
+
Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
80 |
+
)
|
81 |
+
|
82 |
+
def forward(self, x):
|
83 |
+
"""
|
84 |
+
Input:
|
85 |
+
x: [B, C, H, W]
|
86 |
+
Output:
|
87 |
+
out: [B, 2C, H//2, W//2]
|
88 |
+
"""
|
89 |
+
# [B, C, H, W] -> [B, C//2, H//2, W//2]
|
90 |
+
x1 = self.cv1(self.mp(x))
|
91 |
+
x2 = self.cv2(x)
|
92 |
+
|
93 |
+
# [B, C, H//2, W//2]
|
94 |
+
out = torch.cat([x1, x2], dim=1)
|
95 |
+
|
96 |
+
return out
|
97 |
+
|
98 |
+
|
99 |
+
# PaFPN-ELAN
|
100 |
+
class PaFPNELAN(nn.Module):
|
101 |
+
def __init__(self,
|
102 |
+
in_dims=[512, 1024, 1024],
|
103 |
+
out_dim=256,
|
104 |
+
fpn_size='large',
|
105 |
+
depthwise=False,
|
106 |
+
norm_type='BN',
|
107 |
+
act_type='silu'):
|
108 |
+
super(PaFPNELAN, self).__init__()
|
109 |
+
self.in_dims = in_dims
|
110 |
+
self.out_dim = out_dim
|
111 |
+
c3, c4, c5 = in_dims
|
112 |
+
if fpn_size == 'tiny':
|
113 |
+
width = 0.5
|
114 |
+
elif fpn_size == 'nano':
|
115 |
+
assert depthwise
|
116 |
+
width = 0.5
|
117 |
+
elif fpn_size == 'large':
|
118 |
+
width = 1.0
|
119 |
+
elif fpn_size == 'huge':
|
120 |
+
width = 1.25
|
121 |
+
|
122 |
+
# top dwon
|
123 |
+
## P5 -> P4
|
124 |
+
self.cv1 = Conv(c5, int(256 * width), k=1, norm_type=norm_type, act_type=act_type)
|
125 |
+
self.cv2 = Conv(c4, int(256 * width), k=1, norm_type=norm_type, act_type=act_type)
|
126 |
+
self.head_elan_1 = ELANBlock(in_dim=int(256 * width) + int(256 * width),
|
127 |
+
out_dim=int(256 * width),
|
128 |
+
fpn_size=fpn_size,
|
129 |
+
depthwise=depthwise,
|
130 |
+
norm_type=norm_type,
|
131 |
+
act_type=act_type)
|
132 |
+
|
133 |
+
# P4 -> P3
|
134 |
+
self.cv3 = Conv(int(256 * width), int(128 * width), k=1, norm_type=norm_type, act_type=act_type)
|
135 |
+
self.cv4 = Conv(c3, int(128 * width), k=1, norm_type=norm_type, act_type=act_type)
|
136 |
+
self.head_elan_2 = ELANBlock(in_dim=int(128 * width) + int(128 * width),
|
137 |
+
out_dim=int(128 * width), # 128
|
138 |
+
fpn_size=fpn_size,
|
139 |
+
depthwise=depthwise,
|
140 |
+
norm_type=norm_type,
|
141 |
+
act_type=act_type)
|
142 |
+
|
143 |
+
# bottom up
|
144 |
+
# P3 -> P4
|
145 |
+
if fpn_size == 'large' or fpn_size == 'huge':
|
146 |
+
self.mp1 = DownSample(int(128 * width), act_type=act_type,
|
147 |
+
norm_type=norm_type, depthwise=depthwise)
|
148 |
+
elif fpn_size == 'tiny':
|
149 |
+
self.mp1 = Conv(int(128 * width), int(256 * width), k=3, p=1, s=2,
|
150 |
+
act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
151 |
+
elif fpn_size == 'nano':
|
152 |
+
self.mp1 = nn.Sequential(
|
153 |
+
nn.MaxPool2d((2, 2), 2),
|
154 |
+
Conv(int(128 * width), int(256 * width), k=1, act_type=act_type, norm_type=norm_type)
|
155 |
+
)
|
156 |
+
self.head_elan_3 = ELANBlock(in_dim=int(256 * width) + int(256 * width),
|
157 |
+
out_dim=int(256 * width), # 256
|
158 |
+
fpn_size=fpn_size,
|
159 |
+
depthwise=depthwise,
|
160 |
+
norm_type=norm_type,
|
161 |
+
act_type=act_type)
|
162 |
+
|
163 |
+
# P4 -> P5
|
164 |
+
if fpn_size == 'large' or fpn_size == 'huge':
|
165 |
+
self.mp2 = DownSample(int(256 * width), act_type=act_type,
|
166 |
+
norm_type=norm_type, depthwise=depthwise)
|
167 |
+
elif fpn_size == 'tiny':
|
168 |
+
self.mp2 = Conv(int(256 * width), int(512 * width), k=3, p=1, s=2,
|
169 |
+
act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
170 |
+
elif fpn_size == 'nano':
|
171 |
+
self.mp2 = nn.Sequential(
|
172 |
+
nn.MaxPool2d((2, 2), 2),
|
173 |
+
Conv(int(256 * width), int(512 * width), k=1, act_type=act_type, norm_type=norm_type)
|
174 |
+
)
|
175 |
+
self.head_elan_4 = ELANBlock(in_dim=int(512 * width) + c5,
|
176 |
+
out_dim=int(512 * width), # 512
|
177 |
+
fpn_size=fpn_size,
|
178 |
+
depthwise=depthwise,
|
179 |
+
norm_type=norm_type,
|
180 |
+
act_type=act_type)
|
181 |
+
|
182 |
+
self.head_conv_1 = Conv(int(128 * width), int(256 * width), k=3, p=1,
|
183 |
+
act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
184 |
+
self.head_conv_2 = Conv(int(256 * width), int(512 * width), k=3, p=1,
|
185 |
+
act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
186 |
+
self.head_conv_3 = Conv(int(512 * width), int(1024 * width), k=3, p=1,
|
187 |
+
act_type=act_type, norm_type=norm_type, depthwise=depthwise)
|
188 |
+
# output proj layers
|
189 |
+
if self.out_dim is not None:
|
190 |
+
self.out_layers = nn.ModuleList([
|
191 |
+
Conv(in_dim, self.out_dim, k=1,
|
192 |
+
norm_type=norm_type, act_type=act_type)
|
193 |
+
for in_dim in [int(256 * width), int(512 * width), int(1024 * width)]
|
194 |
+
])
|
195 |
+
|
196 |
+
|
197 |
+
def forward(self, features):
|
198 |
+
c3, c4, c5 = features
|
199 |
+
|
200 |
+
# Top down
|
201 |
+
## P5 -> P4
|
202 |
+
c6 = self.cv1(c5)
|
203 |
+
c7 = F.interpolate(c6, scale_factor=2.0)
|
204 |
+
c8 = torch.cat([c7, self.cv2(c4)], dim=1)
|
205 |
+
c9 = self.head_elan_1(c8)
|
206 |
+
## P4 -> P3
|
207 |
+
c10 = self.cv3(c9)
|
208 |
+
c11 = F.interpolate(c10, scale_factor=2.0)
|
209 |
+
c12 = torch.cat([c11, self.cv4(c3)], dim=1)
|
210 |
+
c13 = self.head_elan_2(c12)
|
211 |
+
|
212 |
+
# Bottom up
|
213 |
+
# p3 -> P4
|
214 |
+
c14 = self.mp1(c13)
|
215 |
+
c15 = torch.cat([c14, c9], dim=1)
|
216 |
+
c16 = self.head_elan_3(c15)
|
217 |
+
# P4 -> P5
|
218 |
+
c17 = self.mp2(c16)
|
219 |
+
c18 = torch.cat([c17, c5], dim=1)
|
220 |
+
c19 = self.head_elan_4(c18)
|
221 |
+
|
222 |
+
c20 = self.head_conv_1(c13)
|
223 |
+
c21 = self.head_conv_2(c16)
|
224 |
+
c22 = self.head_conv_3(c19)
|
225 |
+
|
226 |
+
out_feats = [c20, c21, c22] # [P3, P4, P5]
|
227 |
+
|
228 |
+
# output proj layers
|
229 |
+
if self.out_dim is not None:
|
230 |
+
out_feats_proj = []
|
231 |
+
for feat, layer in zip(out_feats, self.out_layers):
|
232 |
+
out_feats_proj.append(layer(feat))
|
233 |
+
return out_feats_proj
|
234 |
+
|
235 |
+
return out_feats
|
236 |
+
|
237 |
+
|
238 |
+
def build_fpn(cfg, in_dims, out_dim):
|
239 |
+
model = cfg['fpn']
|
240 |
+
print('==============================')
|
241 |
+
print('FPN: {}'.format(model))
|
242 |
+
# build neck
|
243 |
+
if model == 'pafpn_elan':
|
244 |
+
fpn_net = PaFPNELAN(in_dims=in_dims,
|
245 |
+
out_dim=out_dim,
|
246 |
+
fpn_size=cfg['fpn_size'],
|
247 |
+
depthwise=cfg['fpn_depthwise'],
|
248 |
+
norm_type=cfg['fpn_norm'],
|
249 |
+
act_type=cfg['fpn_act'])
|
250 |
+
|
251 |
+
|
252 |
+
return fpn_net
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_head.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
try:
|
5 |
+
from yolo_free_basic import Conv
|
6 |
+
except:
|
7 |
+
from .yolo_free_basic import Conv
|
8 |
+
|
9 |
+
|
10 |
+
class DecoupledHead(nn.Module):
|
11 |
+
def __init__(self, cfg):
|
12 |
+
super().__init__()
|
13 |
+
|
14 |
+
print('==============================')
|
15 |
+
print('Head: Decoupled Head')
|
16 |
+
self.num_cls_head=cfg['num_cls_head']
|
17 |
+
self.num_reg_head=cfg['num_reg_head']
|
18 |
+
self.act_type=cfg['head_act']
|
19 |
+
self.norm_type=cfg['head_norm']
|
20 |
+
self.head_dim = cfg['head_dim']
|
21 |
+
|
22 |
+
self.cls_feats = nn.Sequential(*[Conv(self.head_dim,
|
23 |
+
self.head_dim,
|
24 |
+
k=3, p=1, s=1,
|
25 |
+
act_type=self.act_type,
|
26 |
+
norm_type=self.norm_type,
|
27 |
+
depthwise=cfg['head_depthwise']) for _ in range(self.num_cls_head)])
|
28 |
+
self.reg_feats = nn.Sequential(*[Conv(self.head_dim,
|
29 |
+
self.head_dim,
|
30 |
+
k=3, p=1, s=1,
|
31 |
+
act_type=self.act_type,
|
32 |
+
norm_type=self.norm_type,
|
33 |
+
depthwise=cfg['head_depthwise']) for _ in range(self.num_reg_head)])
|
34 |
+
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
"""
|
38 |
+
in_feats: (Tensor) [B, C, H, W]
|
39 |
+
"""
|
40 |
+
cls_feats = self.cls_feats(x)
|
41 |
+
reg_feats = self.reg_feats(x)
|
42 |
+
|
43 |
+
return cls_feats, reg_feats
|
44 |
+
|
45 |
+
|
46 |
+
# build detection head
|
47 |
+
def build_head(cfg):
|
48 |
+
head = DecoupledHead(cfg)
|
49 |
+
|
50 |
+
return head
|
51 |
+
|
models/backbone/backbone_2d/cnn_2d/yolo_free/yolo_free_neck.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
try:
|
5 |
+
from yolo_free_basic import Conv
|
6 |
+
except:
|
7 |
+
from .yolo_free_basic import Conv
|
8 |
+
|
9 |
+
|
10 |
+
# Spatial Pyramid Pooling
|
11 |
+
class SPP(nn.Module):
|
12 |
+
"""
|
13 |
+
Spatial Pyramid Pooling
|
14 |
+
"""
|
15 |
+
def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=[5, 9, 13], norm_type='BN', act_type='relu'):
|
16 |
+
super(SPP, self).__init__()
|
17 |
+
inter_dim = int(in_dim * expand_ratio)
|
18 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
19 |
+
self.m = nn.ModuleList(
|
20 |
+
[
|
21 |
+
nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
22 |
+
for k in pooling_size
|
23 |
+
]
|
24 |
+
)
|
25 |
+
|
26 |
+
self.cv2 = Conv(inter_dim*(len(pooling_size) + 1), out_dim, k=1, act_type=act_type, norm_type=norm_type)
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
x = self.cv1(x)
|
30 |
+
x = torch.cat([x] + [m(x) for m in self.m], dim=1)
|
31 |
+
x = self.cv2(x)
|
32 |
+
|
33 |
+
return x
|
34 |
+
|
35 |
+
|
36 |
+
# SPP block with CSP module
|
37 |
+
class SPPBlock(nn.Module):
|
38 |
+
"""
|
39 |
+
Spatial Pyramid Pooling Block
|
40 |
+
"""
|
41 |
+
def __init__(self,
|
42 |
+
in_dim,
|
43 |
+
out_dim,
|
44 |
+
expand_ratio=0.5,
|
45 |
+
pooling_size=[5, 9, 13],
|
46 |
+
act_type='lrelu',
|
47 |
+
norm_type='BN',
|
48 |
+
depthwise=False
|
49 |
+
):
|
50 |
+
super(SPPBlockCSP, self).__init__()
|
51 |
+
inter_dim = int(in_dim * expand_ratio)
|
52 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
53 |
+
self.cv2 = nn.Sequential(
|
54 |
+
SPP(inter_dim,
|
55 |
+
inter_dim,
|
56 |
+
expand_ratio=1.0,
|
57 |
+
pooling_size=pooling_size,
|
58 |
+
act_type=act_type,
|
59 |
+
norm_type=norm_type),
|
60 |
+
)
|
61 |
+
self.cv3 = Conv(inter_dim * 2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
|
62 |
+
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
x1 = self.cv1(x)
|
66 |
+
x2 = self.cv2(x)
|
67 |
+
y = self.cv3(torch.cat([x1, x2], dim=1))
|
68 |
+
|
69 |
+
return y
|
70 |
+
|
71 |
+
|
72 |
+
# SPP block with CSP module
|
73 |
+
class SPPBlockCSP(nn.Module):
|
74 |
+
"""
|
75 |
+
CSP Spatial Pyramid Pooling Block
|
76 |
+
"""
|
77 |
+
def __init__(self,
|
78 |
+
in_dim,
|
79 |
+
out_dim,
|
80 |
+
expand_ratio=0.5,
|
81 |
+
pooling_size=[5, 9, 13],
|
82 |
+
act_type='lrelu',
|
83 |
+
norm_type='BN',
|
84 |
+
depthwise=False
|
85 |
+
):
|
86 |
+
super(SPPBlockCSP, self).__init__()
|
87 |
+
inter_dim = int(in_dim * expand_ratio)
|
88 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
89 |
+
self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
|
90 |
+
self.m = nn.Sequential(
|
91 |
+
Conv(inter_dim, inter_dim, k=3, p=1,
|
92 |
+
act_type=act_type, norm_type=norm_type,
|
93 |
+
depthwise=depthwise),
|
94 |
+
SPP(inter_dim,
|
95 |
+
inter_dim,
|
96 |
+
expand_ratio=1.0,
|
97 |
+
pooling_size=pooling_size,
|
98 |
+
act_type=act_type,
|
99 |
+
norm_type=norm_type),
|
100 |
+
Conv(inter_dim, inter_dim, k=3, p=1,
|
101 |
+
act_type=act_type, norm_type=norm_type,
|
102 |
+
depthwise=depthwise)
|
103 |
+
)
|
104 |
+
self.cv3 = Conv(inter_dim * 2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
|
105 |
+
|
106 |
+
|
107 |
+
def forward(self, x):
|
108 |
+
x1 = self.cv1(x)
|
109 |
+
x2 = self.cv2(x)
|
110 |
+
x3 = self.m(x2)
|
111 |
+
y = self.cv3(torch.cat([x1, x3], dim=1))
|
112 |
+
|
113 |
+
return y
|
114 |
+
|
115 |
+
|
116 |
+
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
117 |
+
class SPPF(nn.Module):
|
118 |
+
def __init__(self, in_dim, out_dim, k=5): # equivalent to SPP(k=(5, 9, 13))
|
119 |
+
super().__init__()
|
120 |
+
inter_dim = in_dim // 2 # hidden channels
|
121 |
+
self.cv1 = Conv(in_dim, inter_dim, k=1)
|
122 |
+
self.cv2 = Conv(inter_dim * 4, out_dim, k=1)
|
123 |
+
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
124 |
+
|
125 |
+
def forward(self, x):
|
126 |
+
x = self.cv1(x)
|
127 |
+
y1 = self.m(x)
|
128 |
+
y2 = self.m(y1)
|
129 |
+
|
130 |
+
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
131 |
+
|
132 |
+
|
133 |
+
def build_neck(cfg, in_dim, out_dim):
|
134 |
+
model = cfg['neck']
|
135 |
+
# build neck
|
136 |
+
if model == 'spp_block':
|
137 |
+
neck = SPPBlock(
|
138 |
+
in_dim, out_dim,
|
139 |
+
expand_ratio=cfg['expand_ratio'],
|
140 |
+
pooling_size=cfg['pooling_size'],
|
141 |
+
act_type=cfg['neck_act'],
|
142 |
+
norm_type=cfg['neck_norm'],
|
143 |
+
depthwise=cfg['neck_depthwise']
|
144 |
+
)
|
145 |
+
|
146 |
+
elif model == 'spp_block_csp':
|
147 |
+
neck = SPPBlockCSP(
|
148 |
+
in_dim, out_dim,
|
149 |
+
expand_ratio=cfg['expand_ratio'],
|
150 |
+
pooling_size=cfg['pooling_size'],
|
151 |
+
act_type=cfg['neck_act'],
|
152 |
+
norm_type=cfg['neck_norm'],
|
153 |
+
depthwise=cfg['neck_depthwise']
|
154 |
+
)
|
155 |
+
|
156 |
+
elif model == 'sppf':
|
157 |
+
neck = SPPF(in_dim, out_dim, k=cfg['pooling_size'])
|
158 |
+
|
159 |
+
|
160 |
+
return neck
|
161 |
+
|
162 |
+
|
163 |
+
if __name__ == '__main__':
|
164 |
+
pass
|
models/backbone/backbone_3d/__init__.py
ADDED
File without changes
|
models/backbone/backbone_3d/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (151 Bytes). View file
|
|
models/backbone/backbone_3d/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (145 Bytes). View file
|
|
models/backbone/backbone_3d/__pycache__/backbone_3d.cpython-310.pyc
ADDED
Binary file (2.48 kB). View file
|
|
models/backbone/backbone_3d/__pycache__/backbone_3d.cpython-37.pyc
ADDED
Binary file (2.53 kB). View file
|
|
models/backbone/backbone_3d/backbone_3d.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch.nn as nn
|
2 |
+
import torch.nn.functional as F
|
3 |
+
|
4 |
+
from .cnn_3d import build_3d_cnn
|
5 |
+
|
6 |
+
|
7 |
+
class Conv(nn.Module):
|
8 |
+
def __init__(self, in_dim, out_dim, k=3, p=1, s=1, depthwise=False):
|
9 |
+
super().__init__()
|
10 |
+
if depthwise:
|
11 |
+
self.convs = nn.Sequential(
|
12 |
+
nn.Conv2d(in_dim, in_dim, kernel_size=k, padding=p, stride=s, groups=in_dim, bias=False),
|
13 |
+
nn.BatchNorm2d(out_dim),
|
14 |
+
nn.ReLU(inplace=True),
|
15 |
+
nn.Conv2d(in_dim, out_dim, kernel_size=1, groups=in_dim, bias=False),
|
16 |
+
nn.BatchNorm2d(out_dim),
|
17 |
+
nn.ReLU(inplace=True),
|
18 |
+
)
|
19 |
+
else:
|
20 |
+
self.convs = nn.Sequential(
|
21 |
+
nn.Conv2d(in_dim, out_dim, kernel_size=k, padding=p, stride=s, bias=False),
|
22 |
+
nn.BatchNorm2d(out_dim),
|
23 |
+
nn.ReLU(inplace=True)
|
24 |
+
)
|
25 |
+
|
26 |
+
def forward(self, x):
|
27 |
+
return self.convs(x)
|
28 |
+
|
29 |
+
|
30 |
+
class ConvBlocks(nn.Module):
|
31 |
+
def __init__(self, in_dim, out_dim, nblocks=1, depthwise=False):
|
32 |
+
super().__init__()
|
33 |
+
assert in_dim == out_dim
|
34 |
+
|
35 |
+
conv_block = []
|
36 |
+
for _ in range(nblocks):
|
37 |
+
conv_block.append(
|
38 |
+
Conv(in_dim, out_dim, k=3, p=1, s=1, depthwise=depthwise)
|
39 |
+
)
|
40 |
+
self.conv_block = nn.Sequential(*conv_block)
|
41 |
+
|
42 |
+
def forward(self, x):
|
43 |
+
return self.conv_block(x)
|
44 |
+
|
45 |
+
|
46 |
+
class Backbone3D(nn.Module):
|
47 |
+
def __init__(self, cfg, pretrained=False):
|
48 |
+
super().__init__()
|
49 |
+
self.cfg = cfg
|
50 |
+
|
51 |
+
# 3D CNN
|
52 |
+
self.backbone, self.feat_dim = build_3d_cnn(cfg, pretrained)
|
53 |
+
|
54 |
+
|
55 |
+
def forward(self, x):
|
56 |
+
"""
|
57 |
+
Input:
|
58 |
+
x: (Tensor) -> [B, C, T, H, W]
|
59 |
+
Output:
|
60 |
+
y: (List) -> [
|
61 |
+
(Tensor) -> [B, C1, H1, W1],
|
62 |
+
(Tensor) -> [B, C2, H2, W2],
|
63 |
+
(Tensor) -> [B, C3, H3, W3]
|
64 |
+
]
|
65 |
+
"""
|
66 |
+
feat = self.backbone(x)
|
67 |
+
|
68 |
+
return feat
|
models/backbone/backbone_3d/cnn_3d/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .resnet import build_resnet_3d
|
2 |
+
from .resnext import build_resnext_3d
|
3 |
+
from .shufflnetv2 import build_shufflenetv2_3d
|
4 |
+
|
5 |
+
|
6 |
+
def build_3d_cnn(cfg, pretrained=False):
|
7 |
+
print('==============================')
|
8 |
+
print('3D Backbone: {}'.format(cfg['backbone_3d'].upper()))
|
9 |
+
print('--pretrained: {}'.format(pretrained))
|
10 |
+
|
11 |
+
if 'resnet' in cfg['backbone_3d']:
|
12 |
+
model, feat_dims = build_resnet_3d(
|
13 |
+
model_name=cfg['backbone_3d'],
|
14 |
+
pretrained=pretrained
|
15 |
+
)
|
16 |
+
elif 'resnext' in cfg['backbone_3d']:
|
17 |
+
model, feat_dims = build_resnext_3d(
|
18 |
+
model_name=cfg['backbone_3d'],
|
19 |
+
pretrained=pretrained
|
20 |
+
)
|
21 |
+
elif 'shufflenetv2' in cfg['backbone_3d']:
|
22 |
+
model, feat_dims = build_shufflenetv2_3d(
|
23 |
+
model_size=cfg['model_size'],
|
24 |
+
pretrained=pretrained
|
25 |
+
)
|
26 |
+
else:
|
27 |
+
print('Unknown Backbone ...')
|
28 |
+
exit()
|
29 |
+
|
30 |
+
return model, feat_dims
|
models/backbone/backbone_3d/cnn_3d/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (891 Bytes). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (851 Bytes). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/resnet.cpython-310.pyc
ADDED
Binary file (7.2 kB). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/resnet.cpython-37.pyc
ADDED
Binary file (7.38 kB). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/resnext.cpython-310.pyc
ADDED
Binary file (6.46 kB). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/resnext.cpython-37.pyc
ADDED
Binary file (6.52 kB). View file
|
|
models/backbone/backbone_3d/cnn_3d/__pycache__/shufflnetv2.cpython-310.pyc
ADDED
Binary file (5.96 kB). View file
|
|