glenn-jocher
commited on
Commit
•
b5659d1
1
Parent(s):
86784cf
module updates
Browse files- models/common.py +10 -7
- models/experimental.py +34 -0
models/common.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1 |
# This file contains modules common to various models
|
2 |
|
3 |
-
|
4 |
from utils.utils import *
|
5 |
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
def DWConv(c1, c2, k=1, s=1, act=True):
|
8 |
# Depthwise convolution
|
9 |
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
@@ -11,10 +15,9 @@ def DWConv(c1, c2, k=1, s=1, act=True):
|
|
11 |
|
12 |
class Conv(nn.Module):
|
13 |
# Standard convolution
|
14 |
-
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
15 |
super(Conv, self).__init__()
|
16 |
-
|
17 |
-
self.conv = nn.Conv2d(c1, c2, k, s, p, groups=g, bias=False)
|
18 |
self.bn = nn.BatchNorm2d(c2)
|
19 |
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
|
20 |
|
@@ -46,7 +49,7 @@ class BottleneckCSP(nn.Module):
|
|
46 |
self.cv1 = Conv(c1, c_, 1, 1)
|
47 |
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
48 |
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
49 |
-
self.cv4 = Conv(
|
50 |
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
51 |
self.act = nn.LeakyReLU(0.1, inplace=True)
|
52 |
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
@@ -79,9 +82,9 @@ class Flatten(nn.Module):
|
|
79 |
|
80 |
class Focus(nn.Module):
|
81 |
# Focus wh information into c-space
|
82 |
-
def __init__(self, c1, c2, k=1):
|
83 |
super(Focus, self).__init__()
|
84 |
-
self.conv = Conv(c1 * 4, c2, k,
|
85 |
|
86 |
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
87 |
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
|
|
1 |
# This file contains modules common to various models
|
2 |
|
|
|
3 |
from utils.utils import *
|
4 |
|
5 |
|
6 |
+
def autopad(k):
|
7 |
+
# Pad to 'same'
|
8 |
+
return k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
9 |
+
|
10 |
+
|
11 |
def DWConv(c1, c2, k=1, s=1, act=True):
|
12 |
# Depthwise convolution
|
13 |
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
|
|
15 |
|
16 |
class Conv(nn.Module):
|
17 |
# Standard convolution
|
18 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
19 |
super(Conv, self).__init__()
|
20 |
+
self.conv = nn.Conv2d(c1, c2, k, s, p or autopad(k), groups=g, bias=False)
|
|
|
21 |
self.bn = nn.BatchNorm2d(c2)
|
22 |
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
|
23 |
|
|
|
49 |
self.cv1 = Conv(c1, c_, 1, 1)
|
50 |
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
51 |
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
52 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
53 |
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
54 |
self.act = nn.LeakyReLU(0.1, inplace=True)
|
55 |
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
|
|
82 |
|
83 |
class Focus(nn.Module):
|
84 |
# Focus wh information into c-space
|
85 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
86 |
super(Focus, self).__init__()
|
87 |
+
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
88 |
|
89 |
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
90 |
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
models/experimental.py
CHANGED
@@ -1,6 +1,40 @@
|
|
|
|
|
|
1 |
from models.common import *
|
2 |
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
class Sum(nn.Module):
|
5 |
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
6 |
def __init__(self, n, weight=False): # n: number of inputs
|
|
|
1 |
+
# This file contains experimental modules
|
2 |
+
|
3 |
from models.common import *
|
4 |
|
5 |
|
6 |
+
class CrossConv(nn.Module):
|
7 |
+
# Cross Convolution
|
8 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
9 |
+
super(CrossConv, self).__init__()
|
10 |
+
c_ = int(c2 * e) # hidden channels
|
11 |
+
self.cv1 = Conv(c1, c_, (1, 3), 1)
|
12 |
+
self.cv2 = Conv(c_, c2, (3, 1), 1, g=g)
|
13 |
+
self.add = shortcut and c1 == c2
|
14 |
+
|
15 |
+
def forward(self, x):
|
16 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
17 |
+
|
18 |
+
|
19 |
+
class C3(nn.Module):
|
20 |
+
# Cross Convolution CSP
|
21 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
22 |
+
super(C3, self).__init__()
|
23 |
+
c_ = int(c2 * e) # hidden channels
|
24 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
25 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
26 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
27 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
28 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
29 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
30 |
+
self.m = nn.Sequential(*[CrossConv(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
34 |
+
y2 = self.cv2(x)
|
35 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
36 |
+
|
37 |
+
|
38 |
class Sum(nn.Module):
|
39 |
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
40 |
def __init__(self, n, weight=False): # n: number of inputs
|