SMPLer-X / common /nets /layer.py
onescotch
add huggingface implementation
2de1f98
raw
history blame
1.83 kB
import torch.nn as nn
def make_linear_layers(feat_dims, relu_final=True, use_bn=False):
layers = []
for i in range(len(feat_dims)-1):
layers.append(nn.Linear(feat_dims[i], feat_dims[i+1]))
# Do not use ReLU for final estimation
if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and relu_final):
if use_bn:
layers.append(nn.BatchNorm1d(feat_dims[i+1]))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def make_conv_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True):
layers = []
for i in range(len(feat_dims)-1):
layers.append(
nn.Conv2d(
in_channels=feat_dims[i],
out_channels=feat_dims[i+1],
kernel_size=kernel,
stride=stride,
padding=padding
))
# Do not use BN and ReLU for final estimation
if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final):
layers.append(nn.BatchNorm2d(feat_dims[i+1]))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def make_deconv_layers(feat_dims, bnrelu_final=True):
layers = []
for i in range(len(feat_dims)-1):
layers.append(
nn.ConvTranspose2d(
in_channels=feat_dims[i],
out_channels=feat_dims[i+1],
kernel_size=4,
stride=2,
padding=1,
output_padding=0,
bias=False))
# Do not use BN and ReLU for final estimation
if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final):
layers.append(nn.BatchNorm2d(feat_dims[i+1]))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)