Spaces:
Running
Running
- align_faces.py +1 -1
- face_model/face_gan.py +1 -1
- face_model/model.py +1 -1
- retinaface/data/wider_face.py +1 -1
align_faces.py
CHANGED
@@ -36,7 +36,7 @@ def _umeyama(src, dst, estimate_scale=True, scale=1.0):
|
|
36 |
-------
|
37 |
T : (N + 1, N + 1)
|
38 |
The homogeneous similarity transformation matrix. The matrix contains
|
39 |
-
NaN values only if the problem
|
40 |
References
|
41 |
----------
|
42 |
.. [1] "Least-squares estimation of transformation parameters between two
|
|
|
36 |
-------
|
37 |
T : (N + 1, N + 1)
|
38 |
The homogeneous similarity transformation matrix. The matrix contains
|
39 |
+
NaN values only if the problem is not well-conditioned.
|
40 |
References
|
41 |
----------
|
42 |
.. [1] "Least-squares estimation of transformation parameters between two
|
face_model/face_gan.py
CHANGED
@@ -29,7 +29,7 @@ class FaceGAN(object):
|
|
29 |
else:
|
30 |
self.model = FullGenerator_SR(self.in_resolution, self.out_resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device)
|
31 |
pretrained_dict = torch.load(self.mfile, map_location=torch.device('cpu'))
|
32 |
-
if self.key
|
33 |
self.model.load_state_dict(pretrained_dict)
|
34 |
self.model.to(self.device)
|
35 |
self.model.eval()
|
|
|
29 |
else:
|
30 |
self.model = FullGenerator_SR(self.in_resolution, self.out_resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device)
|
31 |
pretrained_dict = torch.load(self.mfile, map_location=torch.device('cpu'))
|
32 |
+
if self.key is not None: pretrained_dict = pretrained_dict[self.key]
|
33 |
self.model.load_state_dict(pretrained_dict)
|
34 |
self.model.to(self.device)
|
35 |
self.model.eval()
|
face_model/model.py
CHANGED
@@ -370,7 +370,7 @@ class ToRGB(nn.Module):
|
|
370 |
out = self.conv(input, style)
|
371 |
out = out + self.bias
|
372 |
|
373 |
-
if skip
|
374 |
skip = self.upsample(skip)
|
375 |
|
376 |
out = out + skip
|
|
|
370 |
out = self.conv(input, style)
|
371 |
out = out + self.bias
|
372 |
|
373 |
+
if skip is not None:
|
374 |
skip = self.upsample(skip)
|
375 |
|
376 |
out = out + skip
|
retinaface/data/wider_face.py
CHANGED
@@ -71,7 +71,7 @@ class WiderFaceDetection(data.Dataset):
|
|
71 |
|
72 |
annotations = np.append(annotations, annotation, axis=0)
|
73 |
target = np.array(annotations)
|
74 |
-
if self.preproc
|
75 |
img, target = self.preproc(img, target)
|
76 |
|
77 |
return torch.from_numpy(img), target
|
|
|
71 |
|
72 |
annotations = np.append(annotations, annotation, axis=0)
|
73 |
target = np.array(annotations)
|
74 |
+
if self.preproc is not None:
|
75 |
img, target = self.preproc(img, target)
|
76 |
|
77 |
return torch.from_numpy(img), target
|