ethanNeuralImage commited on
Commit
63d4a4c
1 Parent(s): 005fbee

app changes

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +36 -35
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__/
app.py CHANGED
@@ -40,38 +40,26 @@ mapper_dict = {
40
  'wavy':'./pretrained_models/styleCLIP_mappers/wavy_hairstyle.pt'
41
  }
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  predictor = dlib.shape_predictor("./pretrained_models/hyperstyle/shape_predictor_68_face_landmarks.dat")
44
  hyperstyle, hyperstyle_args = load_model(opts.hyperstyle_checkpoint_path, update_opts=opts)
45
  resize_amount = (256, 256) if hyperstyle_args.resize_outputs else (hyperstyle_args.output_size, hyperstyle_args.output_size)
46
  im2tensor_transforms = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
47
  direction_calculator = load_direction_calculator(opts)
48
 
49
- ckpt = torch.load(mapper_dict['afro'], map_location='cpu')
50
- opts.checkpoint_path = mapper_dict['afro']
51
- mapper_args = ckpt['opts']
52
- mapper_args.update(vars(opts))
53
- mapper_args = Namespace(**mapper_args)
54
- mapper = StyleCLIPMapper(mapper_args)
55
- mapper.eval()
56
- mapper.cuda()
57
-
58
- def change_mapper(desc):
59
- global mapper
60
- global mapper_args
61
- mapper = None
62
- ckpt = None
63
- mapper_args = None
64
- torch.cuda.empty_cache()
65
- opts.checkpoint_path = mapper_dict[desc]
66
- ckpt = torch.load(mapper_dict[desc], map_location='cpu')
67
- mapper_args = ckpt['opts']
68
- mapper_args.update(vars(opts))
69
- mapper_args = Namespace(**mapper_args)
70
- mapper = StyleCLIPMapper(mapper_args)
71
- mapper.eval()
72
- mapper.cuda()
73
-
74
-
75
 
76
  with gr.Blocks() as demo:
77
  with gr.Row() as row:
@@ -79,17 +67,17 @@ with gr.Blocks() as demo:
79
  source = gr.Image(label="Image to Map", type='filepath')
80
  align = gr.Checkbox(True, label='Align Image')
81
  inverter_bools = gr.CheckboxGroup(["Hyperstyle", "E4E"], value=['Hyperstyle'], label='Inverter Choices')
82
- n_hyperstyle_iterations = gr.Number(3, label='Number of Iterations For Hyperstyle', precision=0)
83
  with gr.Box():
84
  mapper_bool = gr.Checkbox(True, label='Output Mapper Result')
85
  with gr.Box() as mapper_opts:
86
- mapper_choice = gr.Dropdown(['afro', 'bob', 'bowl', 'buzz', 'caesar', 'crew', 'pixie', 'straight', 'undercut', 'wavy'], value='afro', label='What Hairstyle Mapper to Use?')
87
- mapper_alpha = gr.Slider(minimum=-0.5, maximum=0.5, value=0.01, step=0.1, label='Strength of Mapper Alpha',)
88
  with gr.Box():
89
  gd_bool = gr.Checkbox(False, label='Output Global Direction Result')
90
  with gr.Box(visible=False) as gd_opts:
91
  neutral_text = gr.Text(value='A face with hair', label='Neutral Text')
92
- target_text = gr.Text(value=mapper_args.description, label='Target Text')
93
  alpha = gr.Slider(minimum=-10.0, maximum=10.0, value=4.1, step=0.1, label="Alpha for Global Direction")
94
  beta = gr.Slider(minimum=0.0, maximum=0.30, value=0.15, step=0.01, label="Beta for Global Direction")
95
  submit_button = gr.Button("Edit Image")
@@ -100,9 +88,13 @@ with gr.Blocks() as demo:
100
  with gr.Row(visible=False) as e4e_images:
101
  output_e4e_mapper = gr.Image(type='pil', label="E4E Mapper")
102
  output_e4e_gd = gr.Image(type='pil', label="E4E Global Directions", visible=False)
 
 
 
 
 
103
  def mapper_change(new_mapper):
104
- change_mapper(new_mapper)
105
- return mapper_args.description
106
  def inverter_toggles(bools):
107
  e4e_bool = 'E4E' in bools
108
  hyperstyle_bool = 'Hyperstyle' in bools
@@ -125,11 +117,12 @@ with gr.Blocks() as demo:
125
  output_e4e_gd: gr.update(visible=bool)
126
  }
127
 
 
128
  mapper_choice.change(mapper_change, mapper_choice, [target_text])
129
  inverter_bools.change(inverter_toggles, inverter_bools, [hyperstyle_images, e4e_images, n_hyperstyle_iterations])
130
  mapper_bool.change(mapper_toggles, mapper_bool, [mapper_opts, output_hyperstyle_mapper, output_e4e_mapper])
131
  gd_bool.change(gd_toggles, gd_bool, [gd_opts, output_hyperstyle_gd, output_e4e_gd])
132
- def map_latent(inputs, stylespace=False, weight_deltas=None, strength=0.1):
133
  w = inputs.cuda()
134
  with torch.no_grad():
135
  if stylespace:
@@ -150,6 +143,14 @@ with gr.Blocks() as demo:
150
  gd_bool, neutral_text, target_text, alpha, beta,
151
  ):
152
  torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
153
  with torch.no_grad():
154
  output_imgs = []
155
  if align_img:
@@ -167,7 +168,7 @@ with gr.Blocks() as demo:
167
  if 'Hyperstyle' in inverter_bools:
168
  hyperstyle_batch, hyperstyle_latents, hyperstyle_deltas, _ = run_inversion(input_img.unsqueeze(0), hyperstyle, hyperstyle_args, return_intermediate_results=False)
169
  if mapper_bool:
170
- mapped_hyperstyle, _ = map_latent(hyperstyle_latents, stylespace=False, weight_deltas=hyperstyle_deltas, strength=mapper_alpha)
171
  mapped_hyperstyle = tensor2im(mapped_hyperstyle[0])
172
  else:
173
  mapped_hyperstyle = None
@@ -186,7 +187,7 @@ with gr.Blocks() as demo:
186
  e4e_batch, e4e_latents = hyperstyle.w_invert(input_img.unsqueeze(0))
187
  e4e_deltas = None
188
  if mapper_bool:
189
- mapped_e4e, _ = map_latent(e4e_latents, stylespace=False, weight_deltas=e4e_deltas, strength=mapper_alpha)
190
  mapped_e4e = tensor2im(mapped_e4e[0])
191
  else:
192
  mapped_e4e = None
 
40
  'wavy':'./pretrained_models/styleCLIP_mappers/wavy_hairstyle.pt'
41
  }
42
 
43
+ mapper_descs = {
44
+ 'afro':'A face with an afro',
45
+ 'bob':'A face with a bob-cut hairstyle',
46
+ 'bowl':'A face with a bowl cut hairstyle',
47
+ 'buzz':'A face with a buzz cut hairstyle',
48
+ 'caesar':'A face with a caesar cut hairstyle',
49
+ 'crew':'A face with a crew cut hairstyle',
50
+ 'pixie':'A face with a pixie cut hairstyle',
51
+ 'straight':'A face with a straight hair hairstyle',
52
+ 'undercut':'A face with a undercut hairstyle',
53
+ 'wavy':'A face with a wavy hair hairstyle',
54
+ }
55
+
56
+
57
  predictor = dlib.shape_predictor("./pretrained_models/hyperstyle/shape_predictor_68_face_landmarks.dat")
58
  hyperstyle, hyperstyle_args = load_model(opts.hyperstyle_checkpoint_path, update_opts=opts)
59
  resize_amount = (256, 256) if hyperstyle_args.resize_outputs else (hyperstyle_args.output_size, hyperstyle_args.output_size)
60
  im2tensor_transforms = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
61
  direction_calculator = load_direction_calculator(opts)
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  with gr.Blocks() as demo:
65
  with gr.Row() as row:
 
67
  source = gr.Image(label="Image to Map", type='filepath')
68
  align = gr.Checkbox(True, label='Align Image')
69
  inverter_bools = gr.CheckboxGroup(["Hyperstyle", "E4E"], value=['Hyperstyle'], label='Inverter Choices')
70
+ n_hyperstyle_iterations = gr.Number(5, label='Number of Iterations For Hyperstyle', precision=0)
71
  with gr.Box():
72
  mapper_bool = gr.Checkbox(True, label='Output Mapper Result')
73
  with gr.Box() as mapper_opts:
74
+ mapper_choice = gr.Dropdown(list(mapper_dict.keys()), value='afro', label='What Hairstyle Mapper to Use?')
75
+ mapper_alpha = gr.Slider(minimum=-0.5, maximum=0.5, value=0.1, step=0.01, label='Strength of Mapper Alpha',)
76
  with gr.Box():
77
  gd_bool = gr.Checkbox(False, label='Output Global Direction Result')
78
  with gr.Box(visible=False) as gd_opts:
79
  neutral_text = gr.Text(value='A face with hair', label='Neutral Text')
80
+ target_text = gr.Text(value=mapper_descs['afro'], label='Target Text')
81
  alpha = gr.Slider(minimum=-10.0, maximum=10.0, value=4.1, step=0.1, label="Alpha for Global Direction")
82
  beta = gr.Slider(minimum=0.0, maximum=0.30, value=0.15, step=0.01, label="Beta for Global Direction")
83
  submit_button = gr.Button("Edit Image")
 
88
  with gr.Row(visible=False) as e4e_images:
89
  output_e4e_mapper = gr.Image(type='pil', label="E4E Mapper")
90
  output_e4e_gd = gr.Image(type='pil', label="E4E Global Directions", visible=False)
91
+ def n_iter_change(number):
92
+ if number < 0:
93
+ return 0
94
+ else:
95
+ return number
96
  def mapper_change(new_mapper):
97
+ return mapper_descs[new_mapper]
 
98
  def inverter_toggles(bools):
99
  e4e_bool = 'E4E' in bools
100
  hyperstyle_bool = 'Hyperstyle' in bools
 
117
  output_e4e_gd: gr.update(visible=bool)
118
  }
119
 
120
+ n_hyperstyle_iterations.change(n_iter_change, n_hyperstyle_iterations, n_hyperstyle_iterations)
121
  mapper_choice.change(mapper_change, mapper_choice, [target_text])
122
  inverter_bools.change(inverter_toggles, inverter_bools, [hyperstyle_images, e4e_images, n_hyperstyle_iterations])
123
  mapper_bool.change(mapper_toggles, mapper_bool, [mapper_opts, output_hyperstyle_mapper, output_e4e_mapper])
124
  gd_bool.change(gd_toggles, gd_bool, [gd_opts, output_hyperstyle_gd, output_e4e_gd])
125
+ def map_latent(mapper, inputs, stylespace=False, weight_deltas=None, strength=0.1):
126
  w = inputs.cuda()
127
  with torch.no_grad():
128
  if stylespace:
 
143
  gd_bool, neutral_text, target_text, alpha, beta,
144
  ):
145
  torch.cuda.empty_cache()
146
+ opts.checkpoint_path = mapper_dict[mapper_choice]
147
+ ckpt = torch.load(mapper_dict[mapper_choice], map_location='cpu')
148
+ mapper_args = ckpt['opts']
149
+ mapper_args.update(vars(opts))
150
+ mapper_args = Namespace(**mapper_args)
151
+ mapper = StyleCLIPMapper(mapper_args)
152
+ mapper.eval()
153
+ mapper.cuda()
154
  with torch.no_grad():
155
  output_imgs = []
156
  if align_img:
 
168
  if 'Hyperstyle' in inverter_bools:
169
  hyperstyle_batch, hyperstyle_latents, hyperstyle_deltas, _ = run_inversion(input_img.unsqueeze(0), hyperstyle, hyperstyle_args, return_intermediate_results=False)
170
  if mapper_bool:
171
+ mapped_hyperstyle, _ = map_latent(mapper, hyperstyle_latents, stylespace=False, weight_deltas=hyperstyle_deltas, strength=mapper_alpha)
172
  mapped_hyperstyle = tensor2im(mapped_hyperstyle[0])
173
  else:
174
  mapped_hyperstyle = None
 
187
  e4e_batch, e4e_latents = hyperstyle.w_invert(input_img.unsqueeze(0))
188
  e4e_deltas = None
189
  if mapper_bool:
190
+ mapped_e4e, _ = map_latent(mapper, e4e_latents, stylespace=False, weight_deltas=e4e_deltas, strength=mapper_alpha)
191
  mapped_e4e = tensor2im(mapped_e4e[0])
192
  else:
193
  mapped_e4e = None