Spaces:
Runtime error
Runtime error
File size: 6,371 Bytes
c85e4eb fa8f835 c85e4eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import torch
import torch.nn as nn
from torch.nn import Module, Sequential, Conv2d, BatchNorm2d, PReLU, Dropout, Flatten, Linear, BatchNorm1d, MaxPool2d, AdaptiveAvgPool2d, ReLU, Sigmoid
from collections import namedtuple
from pytorch_msssim import ms_ssim
import lpips
import clip
from torchvision import transforms
class LPIPS(nn.Module):
def __init__(self, net='alex', device='cuda'):
super(LPIPS, self).__init__()
self.lpips = lpips.LPIPS(net='alex').to(device)
def forward(self, x, y):
return 1- self.lpips(x, y).squeeze()
class MS_SSIM(nn.Module):
def __init__(self, avg=False):
super(MS_SSIM, self).__init__()
self.ssim = ms_ssim
self.avg = avg
def forward(self, x, y):
## normalize images to [0, 1]
x = (x+1)/2
y = (y+1)/2
return self.ssim(x.unsqueeze(0), y.unsqueeze(0), data_range=1, size_average=self.avg)
class IdScore(nn.Module):
# def __init__(self, opts):
def __init__(self, device='cuda'):
super(IdScore, self).__init__()
# print('Loading ResNet ArcFace')
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6).to(device)
self.facenet.load_state_dict(torch.load('./pretrained_models/model_ir_se50.pth', map_location=torch.device(device)))
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
self.facenet.eval()
self.cosine_sim = nn.CosineSimilarity(dim=1)
def extract_feats(self, x):
x = self.face_pool(x)
x_feats = self.facenet(x)
return x_feats
def forward(self, y, x):
x = x.unsqueeze(0)
y = y.unsqueeze(0)
x_feats = self.extract_feats(x)
y_feats = self.extract_feats(y) # Otherwise use the feature from there
y_feats = y_feats.detach()
# diff_views = y_feats[0].dot(x_feats[0])
cosine_sim = self.cosine_sim(y_feats, x_feats)
return cosine_sim
class ClipHair(nn.Module):
def __init__(self, device='cuda'):
super(ClipHair, self).__init__()
self.model, self.preprocessing = clip.load("ViT-B/32", device=device)
self.cosine_sim = nn.CosineSimilarity(dim=1)
self.device = device
# self.model, self.preprocessing = model, preprocessing
def extract_feats(self, x):
x = transforms.ToPILImage()(x.squeeze())
x = self.preprocessing(x).unsqueeze(0).to(self.device)
x = self.model.encode_image(x)
return x
def forward(self, y, x):
x = x.unsqueeze(0)
y = y.unsqueeze(0)
x_feats = self.extract_feats(x)
y_feats = self.extract_feats(y)
y_feats = y_feats.detach()
cosine_sim = self.cosine_sim(x_feats, y_feats)
# diff_views = y_feats[0].dot(x_feats[0])/ (y_feats[0].norm() * x_feats[0].norm())
return cosine_sim
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class Backbone(Module):
def __init__(self, input_size, num_layers, drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
blocks = get_blocks(num_layers)
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
if input_size == 112:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512, affine=affine))
else:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 14 * 14, 512),
BatchNorm1d(512, affine=affine))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(bottleneck_IR_SE(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x |