jamino30 commited on
Commit
246dd82
1 Parent(s): e9e9628

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +4 -7
  2. inference.py +9 -7
app.py CHANGED
@@ -88,7 +88,8 @@ with gr.Blocks(css=css) as demo:
88
  with gr.Column(elem_id='container'):
89
  content_and_output = gr.Image(label='Content', show_label=False, type='pil', sources=['upload', 'webcam', 'clipboard'], format='jpg', show_download_button=False)
90
  style_dropdown = gr.Radio(choices=list(style_options.keys()), label='Style', info='Note: Adjustments automatically optimize for different styles.', value='Starry Night', type='value')
91
- with gr.Accordion('Adjustments', open=False):
 
92
  with gr.Group():
93
  style_strength_slider = gr.Slider(label='Style Strength', minimum=1, maximum=100, step=1, value=50)
94
 
@@ -138,12 +139,8 @@ with gr.Blocks(css=css) as demo:
138
  )
139
 
140
  examples = gr.Examples(
141
- examples=[
142
- ['./content_images/Bridge.jpg', 'Starry Night', *optimal_settings['Starry Night']],
143
- ['./content_images/GoldenRetriever.jpg', 'Lego Bricks', *optimal_settings['Lego Bricks']],
144
- ['./content_images/SeaTurtle.jpg', 'Oil Painting', *optimal_settings['Oil Painting']],
145
- ['./content_images/NYCSkyline.jpg', 'Scream', *optimal_settings['Scream']]
146
- ],
147
  inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality]
148
  )
149
 
 
88
  with gr.Column(elem_id='container'):
89
  content_and_output = gr.Image(label='Content', show_label=False, type='pil', sources=['upload', 'webcam', 'clipboard'], format='jpg', show_download_button=False)
90
  style_dropdown = gr.Radio(choices=list(style_options.keys()), label='Style', info='Note: Adjustments automatically optimize for different styles.', value='Starry Night', type='value')
91
+
92
+ with gr.Accordion('Adjustments', open=True):
93
  with gr.Group():
94
  style_strength_slider = gr.Slider(label='Style Strength', minimum=1, maximum=100, step=1, value=50)
95
 
 
139
  )
140
 
141
  examples = gr.Examples(
142
+ label='Example',
143
+ examples=[['./content_images/Bridge.jpg', 'Starry Night', 100, False]],
 
 
 
 
144
  inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality]
145
  )
146
 
inference.py CHANGED
@@ -26,23 +26,25 @@ def inference(
26
  content_image,
27
  style_features,
28
  lr,
29
- iterations=35,
 
30
  alpha=1,
31
  beta=1
32
  ):
33
  generated_image = content_image.clone().requires_grad_(True)
34
- optimizer = optim.AdamW([generated_image], lr=lr)
35
 
36
  with torch.no_grad():
37
  content_features = model(content_image)
38
-
39
- for _ in tqdm(range(iterations), desc='The magic is happening ✨'):
40
  optimizer.zero_grad()
41
-
42
  generated_features = model(generated_image)
43
  total_loss = _compute_loss(generated_features, content_features, style_features, alpha, beta)
44
-
45
  total_loss.backward()
46
- optimizer.step()
 
 
 
47
 
48
  return generated_image
 
26
  content_image,
27
  style_features,
28
  lr,
29
+ iterations=3,
30
+ optim_caller=optim.LBFGS,
31
  alpha=1,
32
  beta=1
33
  ):
34
  generated_image = content_image.clone().requires_grad_(True)
35
+ optimizer = optim_caller([generated_image], lr=lr)
36
 
37
  with torch.no_grad():
38
  content_features = model(content_image)
39
+
40
+ def closure():
41
  optimizer.zero_grad()
 
42
  generated_features = model(generated_image)
43
  total_loss = _compute_loss(generated_features, content_features, style_features, alpha, beta)
 
44
  total_loss.backward()
45
+ return total_loss
46
+
47
+ for _ in tqdm(range(iterations), desc='The magic is happening ✨'):
48
+ optimizer.step(closure)
49
 
50
  return generated_image