Spaces:
Sleeping
Sleeping
File size: 1,822 Bytes
749745d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
from torch import nn
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEBlock(nn.Module):
def __init__(
self, channels, reduction=16, use_conv=True, mid_activation=nn.ReLU(inplace=True), out_activation=nn.Sigmoid()
):
super(SEBlock, self).__init__()
self.use_conv = use_conv
mid_channels = channels // reduction
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
if use_conv:
self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, bias=True)
else:
self.fc1 = nn.Linear(channels, mid_channels)
self.activ = mid_activation
if use_conv:
self.conv2 = nn.Conv2d(mid_channels, channels, kernel_size=1, bias=True)
else:
self.fc2 = nn.Linear(mid_channels, channels)
self.sigmoid = out_activation
def forward(self, x):
w = self.pool(x)
if not self.use_conv:
w = w.view(x.size(0), -1)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
w = w.unsqueeze(2).unsqueeze(3)
x = x * w
return x
|