younesbelkada commited on
Commit
b260763
1 Parent(s): 8279921

add new files

Browse files
app.py CHANGED
@@ -9,43 +9,8 @@ from yarg import get
9
  from models.stylegan_generator import StyleGANGenerator
10
  from models.stylegan2_generator import StyleGAN2Generator
11
 
12
- VALID_CHOICES = [
13
- "Bald",
14
- "Young",
15
- "Mustache",
16
- "Eyeglasses",
17
- "Hat",
18
- "Smiling"
19
- ]
20
- ENABLE_GPU = False
21
- MODEL_NAMES = [
22
- 'stylegan_ffhq',
23
- 'stylegan2_ffhq'
24
- ]
25
- NB_IMG = 4
26
- OUTPUT_LIST = [
27
- gr.outputs.Image(type="pil", label="Generated Image") for _ in range(NB_IMG)
28
- ] + [
29
- gr.outputs.Image(type="pil", label="Modified Image") for _ in range(NB_IMG)
30
- ]
31
- description = """
32
- <p>
33
- <center>
34
- This is an interactive demo of the CVPR2020 InterfaceGAN paper, by adding other attributes such as Hat, Bald, etc. />
35
- </center>
36
- </p>
37
- """
38
-
39
- def tensor_to_pil(input_object):
40
- """Shows images in one figure."""
41
- if isinstance(input_object, dict):
42
- im_array = []
43
- images = input_object['image']
44
- else:
45
- images = input_object
46
- for _, image in enumerate(images):
47
- im_array.append(PIL.Image.fromarray(image))
48
- return im_array
49
 
50
  def get_generator(model_name):
51
  if model_name == 'stylegan_ffhq':
@@ -58,12 +23,16 @@ def get_generator(model_name):
58
  generator = generator.cuda()
59
  return generator
60
 
 
 
 
 
 
 
61
  @torch.no_grad()
62
- def inference(seed, choice, model_name, coef, nb_images=NB_IMG):
 
63
  np.random.seed(seed)
64
-
65
- boundary = np.squeeze(np.load(open(os.path.join('boundaries', model_name, 'boundary_%s.npy' % choice), 'rb')))
66
- generator = get_generator(model_name)
67
  latent_codes = generator.easy_sample(nb_images)
68
  if ENABLE_GPU:
69
  latent_codes = latent_codes.cuda()
@@ -73,13 +42,15 @@ def inference(seed, choice, model_name, coef, nb_images=NB_IMG):
73
 
74
  new_latent_codes = latent_codes.copy()
75
  for i, _ in enumerate(generated_images):
76
- new_latent_codes[i, :] += boundary*coef
 
77
 
78
  modified_generated_images = generator.easy_synthesize(new_latent_codes)
79
  modified_generated_images = tensor_to_pil(modified_generated_images)
80
 
81
- return generated_images + modified_generated_images
82
 
 
83
 
84
  iface = gr.Interface(
85
  fn=inference,
@@ -88,19 +59,9 @@ iface = gr.Interface(
88
  minimum=0,
89
  maximum=1000,
90
  step=1,
91
- default=264,
92
  label="Random seed to use for the generation"
93
  ),
94
- gr.inputs.Dropdown(
95
- choices=VALID_CHOICES,
96
- type="value",
97
- label="Attribute to modify",
98
- ),
99
- gr.inputs.Dropdown(
100
- choices=MODEL_NAMES,
101
- type="value",
102
- label="Model to use",
103
- ),
104
  gr.inputs.Slider(
105
  minimum=-3,
106
  maximum=3,
@@ -108,6 +69,14 @@ iface = gr.Interface(
108
  default=0,
109
  label="Modification coefficient",
110
  ),
 
 
 
 
 
 
 
 
111
  ],
112
  outputs=OUTPUT_LIST,
113
  layout="horizontal",
 
9
  from models.stylegan_generator import StyleGANGenerator
10
  from models.stylegan2_generator import StyleGAN2Generator
11
 
12
+ from utils.constants import VALID_CHOICES, ENABLE_GPU, MODEL_NAME, OUTPUT_LIST, description
13
+ from utils.image_manip import tensor_to_pil, concat_images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def get_generator(model_name):
16
  if model_name == 'stylegan_ffhq':
 
23
  generator = generator.cuda()
24
  return generator
25
 
26
+ generator = get_generator(MODEL_NAME)
27
+ boundaries = {
28
+ boundary:np.squeeze(np.load(open(os.path.join('boundaries', MODEL_NAME, 'boundary_%s.npy' % boundary), 'rb')))
29
+ for boundary in VALID_CHOICES
30
+ }
31
+
32
  @torch.no_grad()
33
+ def inference(seed, coef, nb_images, list_choices):
34
+ global generator, boundaries
35
  np.random.seed(seed)
 
 
 
36
  latent_codes = generator.easy_sample(nb_images)
37
  if ENABLE_GPU:
38
  latent_codes = latent_codes.cuda()
 
42
 
43
  new_latent_codes = latent_codes.copy()
44
  for i, _ in enumerate(generated_images):
45
+ for choice in list_choices:
46
+ new_latent_codes[i, :] += boundaries[choice]*coef
47
 
48
  modified_generated_images = generator.easy_synthesize(new_latent_codes)
49
  modified_generated_images = tensor_to_pil(modified_generated_images)
50
 
51
+ concatenated_output = concat_images(generated_images, modified_generated_images)
52
 
53
+ return concatenated_output
54
 
55
  iface = gr.Interface(
56
  fn=inference,
 
59
  minimum=0,
60
  maximum=1000,
61
  step=1,
62
+ default=644,
63
  label="Random seed to use for the generation"
64
  ),
 
 
 
 
 
 
 
 
 
 
65
  gr.inputs.Slider(
66
  minimum=-3,
67
  maximum=3,
 
69
  default=0,
70
  label="Modification coefficient",
71
  ),
72
+ gr.inputs.Slider(
73
+ minimum=1,
74
+ maximum=10,
75
+ step=1,
76
+ default=2,
77
+ label="Number of images to generate",
78
+ ),
79
+ gr.inputs.CheckboxGroup(VALID_CHOICES, default=[], type="value", label=None, optional=False)
80
  ],
81
  outputs=OUTPUT_LIST,
82
  layout="horizontal",
boundaries/stylegan_ffhq/boundary_Beard.npy ADDED
Binary file (2.18 kB). View file
 
boundaries/stylegan_ffhq/boundary_Gender.npy ADDED
Binary file (2.18 kB). View file
 
utils/constants.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ VALID_CHOICES = [
4
+ "Bald",
5
+ "Young",
6
+ "Mustache",
7
+ "Eyeglasses",
8
+ "Hat",
9
+ "Smiling",
10
+ "Gender",
11
+ "Beard"
12
+ ]
13
+ ENABLE_GPU = False
14
+ MODEL_NAME = "stylegan_ffhq"
15
+ OUTPUT_LIST = [
16
+ gr.outputs.Image(type="pil", label="Generated Images"),
17
+ gr.outputs.Image(type="pil", label="Modified Images"),
18
+ ]
19
+ description = """
20
+ <p>
21
+ <center>
22
+ This is an interactive demo of the CVPR2020 InterfaceGAN paper, by adding other attributes such as Hat, Bald, etc.
23
+ </center>
24
+ </p>
25
+ """
utils/image_manip.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import PIL
3
+
4
+ def concat_images(generated_images, modified_generated_images):
5
+ """Shows images in one figure."""
6
+ concatenated_array_genenerated_images = np.concatenate([np.array(image) for image in generated_images], axis=1)
7
+ concatenated_array_modified_generated_images = np.concatenate([np.array(image) for image in modified_generated_images], axis=1)
8
+
9
+ return [PIL.Image.fromarray(concatenated_array_genenerated_images), PIL.Image.fromarray(concatenated_array_modified_generated_images)]
10
+
11
+ def tensor_to_pil(input_object):
12
+ if isinstance(input_object, dict):
13
+ im_array = []
14
+ images = input_object['image']
15
+ else:
16
+ images = input_object
17
+ for _, image in enumerate(images):
18
+ im_array.append(PIL.Image.fromarray(image))
19
+ return im_array