Update app.py
Browse files
app.py
CHANGED
@@ -69,14 +69,15 @@ def on_change_event(app_state):
|
|
69 |
step = app_state['step']
|
70 |
label = f'Reconstructed image from the latent state at step {step}. It will get better :)'
|
71 |
print(f'Updating the image:! {app_state}')
|
72 |
-
return gr.update(value=img, label=label)
|
73 |
else:
|
74 |
-
return gr.update(label='Illustration will appear here soon')
|
75 |
|
76 |
with gr.Blocks() as demo:
|
77 |
|
78 |
def generate_image(prompt, inference_steps, app_state):
|
79 |
app_state['running'] = True
|
|
|
80 |
def callback(step, ts, latents):
|
81 |
print (f'In Callback on {step} {ts} !')
|
82 |
latents = 1 / 0.18215 * latents
|
@@ -86,17 +87,20 @@ with gr.Blocks() as demo:
|
|
86 |
res = pipe.numpy_to_pil(res)[0]
|
87 |
app_state['img'] = res
|
88 |
app_state['step'] = step
|
|
|
89 |
print (f'In Callback on {app_state} Done!')
|
90 |
|
91 |
prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration'
|
92 |
-
img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=
|
93 |
app_state['running'] = False
|
94 |
app_state['img'] = None
|
95 |
return gr.update(value=img.images[0], label='Generated image')
|
96 |
|
97 |
app_state = gr.State({'img': None,
|
98 |
'step':0,
|
99 |
-
'running':False
|
|
|
|
|
100 |
title = gr.Markdown('## Lord of the rings app')
|
101 |
description = gr.Markdown(f'#### A Lord of the rings inspired app that combines text and image generation.'
|
102 |
f' The language modeling is done by fine tuning distilgpt2 on the LOTR trilogy.'
|
@@ -112,23 +116,26 @@ with gr.Blocks() as demo:
|
|
112 |
' but here you can see here what is generated from the latent state of the diffuser every few steps.'
|
113 |
' Usually there is a significant improvement around step 12 that yields much better result')
|
114 |
image = gr.Image(label='Illustration for your story', show_label=True)
|
|
|
|
|
115 |
|
116 |
inference_steps = gr.Slider(5, 30,
|
117 |
value=20,
|
118 |
step=1,
|
119 |
visible=True,
|
120 |
-
label=f"Num inference steps (more steps
|
121 |
|
122 |
|
123 |
bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
|
124 |
bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps, app_state], outputs=image)
|
125 |
|
126 |
eventslider = gr.Slider(visible=False)
|
127 |
-
dep = demo.load(on_change_event, app_state, image, every=10)
|
128 |
-
eventslider.change(fn=on_change_event, inputs=[app_state], outputs=[image], every=10, cancels=[dep])
|
129 |
|
130 |
|
131 |
if READ_TOKEN:
|
132 |
demo.queue().launch()
|
133 |
else:
|
134 |
demo.queue().launch(share=True, debug=True)
|
|
|
|
69 |
step = app_state['step']
|
70 |
label = f'Reconstructed image from the latent state at step {step}. It will get better :)'
|
71 |
print(f'Updating the image:! {app_state}')
|
72 |
+
return gr.update(value=img, label=label), gr.update(value=app_state['img_list'], label='intermediate steps')
|
73 |
else:
|
74 |
+
return gr.update(label='Illustration will appear here soon'), gr.update(label='images list')
|
75 |
|
76 |
with gr.Blocks() as demo:
|
77 |
|
78 |
def generate_image(prompt, inference_steps, app_state):
|
79 |
app_state['running'] = True
|
80 |
+
app_state['img_list'] = []
|
81 |
def callback(step, ts, latents):
|
82 |
print (f'In Callback on {step} {ts} !')
|
83 |
latents = 1 / 0.18215 * latents
|
|
|
87 |
res = pipe.numpy_to_pil(res)[0]
|
88 |
app_state['img'] = res
|
89 |
app_state['step'] = step
|
90 |
+
app_state['img_list'].append(res)
|
91 |
print (f'In Callback on {app_state} Done!')
|
92 |
|
93 |
prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration'
|
94 |
+
img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=1)
|
95 |
app_state['running'] = False
|
96 |
app_state['img'] = None
|
97 |
return gr.update(value=img.images[0], label='Generated image')
|
98 |
|
99 |
app_state = gr.State({'img': None,
|
100 |
'step':0,
|
101 |
+
'running':False,
|
102 |
+
'img_list': []
|
103 |
+
})
|
104 |
title = gr.Markdown('## Lord of the rings app')
|
105 |
description = gr.Markdown(f'#### A Lord of the rings inspired app that combines text and image generation.'
|
106 |
f' The language modeling is done by fine tuning distilgpt2 on the LOTR trilogy.'
|
|
|
116 |
' but here you can see here what is generated from the latent state of the diffuser every few steps.'
|
117 |
' Usually there is a significant improvement around step 12 that yields much better result')
|
118 |
image = gr.Image(label='Illustration for your story', show_label=True)
|
119 |
+
gallery = gr.Gallery()
|
120 |
+
gallery.style(grid=[4])
|
121 |
|
122 |
inference_steps = gr.Slider(5, 30,
|
123 |
value=20,
|
124 |
step=1,
|
125 |
visible=True,
|
126 |
+
label=f"Num inference steps (more steps yields a better image but takes more time)")
|
127 |
|
128 |
|
129 |
bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
|
130 |
bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps, app_state], outputs=image)
|
131 |
|
132 |
eventslider = gr.Slider(visible=False)
|
133 |
+
dep = demo.load(on_change_event, app_state, [image, gallery], every=10)
|
134 |
+
eventslider.change(fn=on_change_event, inputs=[app_state], outputs=[image, gallery], every=10, cancels=[dep])
|
135 |
|
136 |
|
137 |
if READ_TOKEN:
|
138 |
demo.queue().launch()
|
139 |
else:
|
140 |
demo.queue().launch(share=True, debug=True)
|
141 |
+
|