Spaces:
Runtime error
Runtime error
import torch | |
import torch.nn as nn | |
from torch.nn import Module, Sequential, Conv2d, BatchNorm2d, PReLU, Dropout, Flatten, Linear, BatchNorm1d, MaxPool2d, AdaptiveAvgPool2d, ReLU, Sigmoid | |
from collections import namedtuple | |
from pytorch_msssim import ms_ssim | |
import lpips | |
import clip | |
from torchvision import transforms | |
class LPIPS(nn.Module): | |
def __init__(self, net='alex', device='cuda'): | |
super(LPIPS, self).__init__() | |
self.lpips = lpips.LPIPS(net='alex').to(device) | |
def forward(self, x, y): | |
return 1- self.lpips(x, y).squeeze() | |
class MS_SSIM(nn.Module): | |
def __init__(self, avg=False): | |
super(MS_SSIM, self).__init__() | |
self.ssim = ms_ssim | |
self.avg = avg | |
def forward(self, x, y): | |
## normalize images to [0, 1] | |
x = (x+1)/2 | |
y = (y+1)/2 | |
return self.ssim(x.unsqueeze(0), y.unsqueeze(0), data_range=1, size_average=self.avg) | |
class IdScore(nn.Module): | |
# def __init__(self, opts): | |
def __init__(self, device='cuda'): | |
super(IdScore, self).__init__() | |
# print('Loading ResNet ArcFace') | |
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6).to(device) | |
self.facenet.load_state_dict(torch.load('./pretrained_models/model_ir_se50.pth', map_location=torch.device(device))) | |
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) | |
self.facenet.eval() | |
self.cosine_sim = nn.CosineSimilarity(dim=1) | |
def extract_feats(self, x): | |
x = self.face_pool(x) | |
x_feats = self.facenet(x) | |
return x_feats | |
def forward(self, y, x): | |
x = x.unsqueeze(0) | |
y = y.unsqueeze(0) | |
x_feats = self.extract_feats(x) | |
y_feats = self.extract_feats(y) # Otherwise use the feature from there | |
y_feats = y_feats.detach() | |
# diff_views = y_feats[0].dot(x_feats[0]) | |
cosine_sim = self.cosine_sim(y_feats, x_feats) | |
return cosine_sim | |
class ClipHair(nn.Module): | |
def __init__(self, device='cuda'): | |
super(ClipHair, self).__init__() | |
self.model, self.preprocessing = clip.load("ViT-B/32", device=device) | |
self.cosine_sim = nn.CosineSimilarity(dim=1) | |
self.device = device | |
# self.model, self.preprocessing = model, preprocessing | |
def extract_feats(self, x): | |
x = transforms.ToPILImage()(x.squeeze()) | |
x = self.preprocessing(x).unsqueeze(0).to(self.device) | |
x = self.model.encode_image(x) | |
return x | |
def forward(self, y, x): | |
x = x.unsqueeze(0) | |
y = y.unsqueeze(0) | |
x_feats = self.extract_feats(x) | |
y_feats = self.extract_feats(y) | |
y_feats = y_feats.detach() | |
cosine_sim = self.cosine_sim(x_feats, y_feats) | |
# diff_views = y_feats[0].dot(x_feats[0])/ (y_feats[0].norm() * x_feats[0].norm()) | |
return cosine_sim | |
class bottleneck_IR_SE(Module): | |
def __init__(self, in_channel, depth, stride): | |
super(bottleneck_IR_SE, self).__init__() | |
if in_channel == depth: | |
self.shortcut_layer = MaxPool2d(1, stride) | |
else: | |
self.shortcut_layer = Sequential( | |
Conv2d(in_channel, depth, (1, 1), stride, bias=False), | |
BatchNorm2d(depth) | |
) | |
self.res_layer = Sequential( | |
BatchNorm2d(in_channel), | |
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), | |
PReLU(depth), | |
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), | |
BatchNorm2d(depth), | |
SEModule(depth, 16) | |
) | |
def forward(self, x): | |
shortcut = self.shortcut_layer(x) | |
res = self.res_layer(x) | |
return res + shortcut | |
class Backbone(Module): | |
def __init__(self, input_size, num_layers, drop_ratio=0.4, affine=True): | |
super(Backbone, self).__init__() | |
assert input_size in [112, 224], "input_size should be 112 or 224" | |
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" | |
blocks = get_blocks(num_layers) | |
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), | |
BatchNorm2d(64), | |
PReLU(64)) | |
if input_size == 112: | |
self.output_layer = Sequential(BatchNorm2d(512), | |
Dropout(drop_ratio), | |
Flatten(), | |
Linear(512 * 7 * 7, 512), | |
BatchNorm1d(512, affine=affine)) | |
else: | |
self.output_layer = Sequential(BatchNorm2d(512), | |
Dropout(drop_ratio), | |
Flatten(), | |
Linear(512 * 14 * 14, 512), | |
BatchNorm1d(512, affine=affine)) | |
modules = [] | |
for block in blocks: | |
for bottleneck in block: | |
modules.append(bottleneck_IR_SE(bottleneck.in_channel, | |
bottleneck.depth, | |
bottleneck.stride)) | |
self.body = Sequential(*modules) | |
def forward(self, x): | |
x = self.input_layer(x) | |
x = self.body(x) | |
x = self.output_layer(x) | |
return l2_norm(x) | |
def get_blocks(num_layers): | |
if num_layers == 50: | |
blocks = [ | |
get_block(in_channel=64, depth=64, num_units=3), | |
get_block(in_channel=64, depth=128, num_units=4), | |
get_block(in_channel=128, depth=256, num_units=14), | |
get_block(in_channel=256, depth=512, num_units=3) | |
] | |
elif num_layers == 100: | |
blocks = [ | |
get_block(in_channel=64, depth=64, num_units=3), | |
get_block(in_channel=64, depth=128, num_units=13), | |
get_block(in_channel=128, depth=256, num_units=30), | |
get_block(in_channel=256, depth=512, num_units=3) | |
] | |
elif num_layers == 152: | |
blocks = [ | |
get_block(in_channel=64, depth=64, num_units=3), | |
get_block(in_channel=64, depth=128, num_units=8), | |
get_block(in_channel=128, depth=256, num_units=36), | |
get_block(in_channel=256, depth=512, num_units=3) | |
] | |
else: | |
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) | |
return blocks | |
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): | |
""" A named tuple describing a ResNet block. """ | |
def get_block(in_channel, depth, num_units, stride=2): | |
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] | |
def l2_norm(input, axis=1): | |
norm = torch.norm(input, 2, axis, True) | |
output = torch.div(input, norm) | |
return output | |
class SEModule(Module): | |
def __init__(self, channels, reduction): | |
super(SEModule, self).__init__() | |
self.avg_pool = AdaptiveAvgPool2d(1) | |
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) | |
self.relu = ReLU(inplace=True) | |
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) | |
self.sigmoid = Sigmoid() | |
def forward(self, x): | |
module_input = x | |
x = self.avg_pool(x) | |
x = self.fc1(x) | |
x = self.relu(x) | |
x = self.fc2(x) | |
x = self.sigmoid(x) | |
return module_input * x |