erwann commited on
Commit
82e6d22
1 Parent(s): cf17725
Files changed (3) hide show
  1. README.md +19 -16
  2. edit.py +2 -2
  3. img_processing.py +1 -22
README.md CHANGED
@@ -1,21 +1,24 @@
1
- ---
2
- title: Face Editor
3
- emoji: 🪞
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.14.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
  # Face Editor
13
- This face editor uses a CelebA pretrained VQGAN with CLIP to allow prompt-based image manipulation, as well as slider based manipulation using extracted latent vectors.
14
 
15
- I've written a series of Medium articles which provide a detailed and beginner-friendly explanation of how this was built.
16
 
17
- ## Features:
18
- Edit masking using custom backpropagation hook
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Face Editor
 
2
 
3
+ ## Examples
4
 
 
 
5
 
6
+ ## Overview
7
+ This interactive GUI face editor uses a CelebA-pretrained VQGAN-CLIP for prompt-based image manipulation, as well as slider based manipulation using extracted latent vectors.
8
+
9
+ I've written a series of Medium articles which provide a detailed and beginner-friendly explanation of how this was built and the intuition behind latent image manipulation.
10
+
11
+ [Coming Soon]
12
+
13
+ ## Demo
14
+ Clone the repo and run `app.py` or <a href="https://colab.research.google.com/drive/110uAZIRQjQen0rKqcnX_bqUXIahvRsm9?usp=sharing"> open this colab notebook </a> and run all the cells, then click on the link that appears under the final cell.
15
+
16
+ ## Features:
17
+ - Positive and negative prompts
18
+ - Multiple prompts
19
+ - Local editing using a gradient masked adversarial loss (implemented as custom pytorch backpropagation hooks). The CLIP loss gradients are masked according to the user's selection, and the LPIPS loss gradients are masked with the inverse of the user mask in order to preserve the initial identity of the face and prevent changes outside of the masked zone that otherwise happen due to latent variable entanglement.
20
+ - Extracted latent vectors for slider-based editing
21
+ - Rewinding through the history of edits, resuming edits from a previous point in the history
22
+ - Creating GIF animations of the editing process
23
 
24
+ ## Animations
edit.py CHANGED
@@ -55,8 +55,8 @@ if __name__ == "__main__":
55
  model.to(device)
56
  blend_paths(
57
  model,
58
- "./test_data/face.jpeg",
59
- "./test_data/face2.jpeg",
60
  quantize=False,
61
  weight=0.5,
62
  )
 
55
  model.to(device)
56
  blend_paths(
57
  model,
58
+ "./test_pics/face.jpeg",
59
+ "./test_pics/face2.jpeg",
60
  quantize=False,
61
  weight=0.5,
62
  )
img_processing.py CHANGED
@@ -9,13 +9,7 @@ import torchvision.transforms.functional as TF
9
  from PIL import Image, ImageDraw, ImageFont
10
 
11
 
12
- def download_image(url):
13
- resp = requests.get(url)
14
- resp.raise_for_status()
15
- return PIL.Image.open(io.BytesIO(resp.content))
16
-
17
-
18
- def preprocess(img, target_image_size=256, map_dalle=False):
19
  s = min(img.size)
20
 
21
  if s < target_image_size:
@@ -59,18 +53,3 @@ def loop_post_process(x):
59
  x = get_pil(x.squeeze())
60
  return x.permute(2, 0, 1).unsqueeze(0)
61
 
62
-
63
- def stack_reconstructions(input, x0, x1, x2, x3, titles=[]):
64
- assert input.size == x1.size == x2.size == x3.size
65
- w, h = input.size[0], input.size[1]
66
- img = Image.new("RGB", (5 * w, h))
67
- img.paste(input, (0, 0))
68
- img.paste(x0, (1 * w, 0))
69
- img.paste(x1, (2 * w, 0))
70
- img.paste(x2, (3 * w, 0))
71
- img.paste(x3, (4 * w, 0))
72
- for i, title in enumerate(titles):
73
- ImageDraw.Draw(img).text(
74
- (i * w, 0), f"{title}", (255, 255, 255), font=font
75
- ) # coordinates, text, color, font
76
- return img
 
9
  from PIL import Image, ImageDraw, ImageFont
10
 
11
 
12
+ def preprocess(img, target_image_size=256):
 
 
 
 
 
 
13
  s = min(img.size)
14
 
15
  if s < target_image_size:
 
53
  x = get_pil(x.squeeze())
54
  return x.permute(2, 0, 1).unsqueeze(0)
55