mikegarts commited on
Commit
ea8b702
1 Parent(s): bb6c48a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import time
2
  import os
 
3
  import gradio as gr
4
 
5
  import torch
@@ -19,7 +20,7 @@ if has_cuda:
19
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=READ_TOKEN)
20
  device = "cuda"
21
  else:
22
- pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", use_auth_token=READ_TOKEN)
23
  device = "cpu"
24
 
25
  pipe.to(device)
@@ -33,6 +34,8 @@ tokenizer = AutoTokenizer.from_pretrained(SAVED_CHECKPOINT)
33
 
34
  summarizer = pipeline("summarization")
35
 
 
 
36
  def break_until_dot(txt):
37
  return txt.rsplit('.', 1)[0] + '.'
38
 
@@ -42,7 +45,8 @@ def generate(prompt):
42
 
43
  outputs = model.generate(
44
  input_ids=input_ids,
45
- max_length=180,
 
46
  temperature=0.7,
47
  num_return_sequences=3,
48
  do_sample=True
@@ -57,22 +61,22 @@ def generate_story(prompt):
57
  summary = break_until_dot(summary)
58
  return story, summary, gr.update(visible=True)
59
 
60
- def on_change_event(app_state=None):
61
- print(f'In change event!')
62
- if app_state and app_state['running']:
63
  img = app_state['img']
64
  step = app_state['step']
65
  label = f'Reconstructed image from the latent state at step {step}'
 
66
  return gr.update(value=img, label=label)
67
  else:
68
- return None
69
 
70
  with gr.Blocks() as demo:
71
 
72
  def generate_image(prompt, inference_steps, app_state):
73
  app_state['running'] = True
74
  def callback(step, ts, latents):
75
- print (f'In Callback on {step}!')
76
  latents = 1 / 0.18215 * latents
77
  res = pipe.vae.decode(latents).sample
78
  res = (res / 2 + 0.5).clamp(0, 1)
@@ -80,9 +84,10 @@ with gr.Blocks() as demo:
80
  res = pipe.numpy_to_pil(res)[0]
81
  app_state['img'] = res
82
  app_state['step'] = step
 
83
 
84
  prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration'
85
- img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=5)
86
  app_state['running'] = False
87
  return gr.update(value=img.images[0], label='Generated image')
88
 
@@ -103,7 +108,7 @@ with gr.Blocks() as demo:
103
  img_description = gr.Markdown('Image generation take some time'
104
  ' but here you can see the what is generated from the latent state of the diffuser every few steps.'
105
  ' Usually there is a significant improvement around step 15, that yields much better result')
106
- image = gr.Image(label='Illustration for your story', shape=(512, 512), show_label=True)
107
 
108
  inference_steps = gr.Slider(5, 30,
109
  value=15,
@@ -115,15 +120,12 @@ with gr.Blocks() as demo:
115
  bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
116
  bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps, app_state], outputs=image)
117
 
118
- # bt_boo = gr.Button("Click me")
119
- # bt_boo.click(fn=on_change_event, inputs=app_state, outputs=image, every=1)
120
- # eventslider = gr.Slider(label='Boooo!')
121
- # dep = demo.load(on_change_event, None, None, every=1)
122
- # eventslider.change(fn=on_change_event, inputs=[app_state], outputs=[image], every=1, cancels=[dep])
123
- inference_steps.change(fn=on_change_event, inputs=[app_state], outputs=[image], every=1)
124
 
125
 
126
  if READ_TOKEN:
127
  demo.queue().launch()
128
  else:
129
- demo.queue().launch(share=True, debug=True)
 
1
  import time
2
  import os
3
+ import PIL
4
  import gradio as gr
5
 
6
  import torch
 
20
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=READ_TOKEN)
21
  device = "cuda"
22
  else:
23
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=READ_TOKEN)
24
  device = "cpu"
25
 
26
  pipe.to(device)
 
34
 
35
  summarizer = pipeline("summarization")
36
 
37
+ #######################################################
38
+
39
  def break_until_dot(txt):
40
  return txt.rsplit('.', 1)[0] + '.'
41
 
 
45
 
46
  outputs = model.generate(
47
  input_ids=input_ids,
48
+ max_length=120,
49
+ min_length=50,
50
  temperature=0.7,
51
  num_return_sequences=3,
52
  do_sample=True
 
61
  summary = break_until_dot(summary)
62
  return story, summary, gr.update(visible=True)
63
 
64
+ def on_change_event(app_state):
65
+ if app_state and app_state['running'] and app_state['img']:
 
66
  img = app_state['img']
67
  step = app_state['step']
68
  label = f'Reconstructed image from the latent state at step {step}'
69
+ print(f'Updating the image:! {app_state}')
70
  return gr.update(value=img, label=label)
71
  else:
72
+ return gr.update()
73
 
74
  with gr.Blocks() as demo:
75
 
76
  def generate_image(prompt, inference_steps, app_state):
77
  app_state['running'] = True
78
  def callback(step, ts, latents):
79
+ print (f'In Callback on {step} {ts} !')
80
  latents = 1 / 0.18215 * latents
81
  res = pipe.vae.decode(latents).sample
82
  res = (res / 2 + 0.5).clamp(0, 1)
 
84
  res = pipe.numpy_to_pil(res)[0]
85
  app_state['img'] = res
86
  app_state['step'] = step
87
+ print (f'In Callback on {app_state} Done!')
88
 
89
  prompt = prompt + ' masterpiece charcoal pencil art lord of the rings illustration'
90
+ img = pipe(prompt, height=512, width=512, num_inference_steps=inference_steps, callback=callback, callback_steps=2)
91
  app_state['running'] = False
92
  return gr.update(value=img.images[0], label='Generated image')
93
 
 
108
  img_description = gr.Markdown('Image generation take some time'
109
  ' but here you can see the what is generated from the latent state of the diffuser every few steps.'
110
  ' Usually there is a significant improvement around step 15, that yields much better result')
111
+ image = gr.Image(label='Illustration for your story', show_label=True)
112
 
113
  inference_steps = gr.Slider(5, 30,
114
  value=15,
 
120
  bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
121
  bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps, app_state], outputs=image)
122
 
123
+ eventslider = gr.Slider(visible=False)
124
+ dep = demo.load(on_change_event, app_state, image, every=10)
125
+ eventslider.change(fn=on_change_event, inputs=[app_state], outputs=[image], every=10, cancels=[dep])
 
 
 
126
 
127
 
128
  if READ_TOKEN:
129
  demo.queue().launch()
130
  else:
131
+ demo.queue().launch(share=True, debug=True)