wenjiao commited on
Commit
35493cf
1 Parent(s): 1d38fa1
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -89,14 +89,20 @@ def txt2img_generate(prompt, steps=25, seed=42, guidance_scale=7.5):
89
  print("Compute node: ", json.loads(resp.text)["ip"])
90
  except:
91
  print('No inference result. Please check server connection')
92
- return None
93
 
94
  img_byte = base64.b64decode(img_str)
95
  img_io = BytesIO(img_byte) # convert image to file-like object
96
  img = Image.open(img_io) # img is now PIL Image object
97
  print("elapsed time: ", time.time() - start_time)
98
 
99
- return img
 
 
 
 
 
 
100
 
101
 
102
  md = """
@@ -120,8 +126,8 @@ css = '''
120
  .duplicate-button img{margin: 0}
121
  #mdStyle{font-size: 0.6rem}
122
  .generating.svelte-1w9161c { border: none }
123
- #txtGreenStyle {2px solid #32ec48;}
124
- #txtOrangeStyle {2px solid #e77718;}
125
  '''
126
 
127
  random_seed = random.randint(0, 2147483647)
@@ -130,7 +136,8 @@ with gr.Blocks(css=css) as demo:
130
  gr.Markdown("# Stable Diffusion Inference Demo on 4th Gen Intel Xeon Scalable Processors")
131
  gr.Markdown(md)
132
 
133
- gr.Textbox(set_msg, every=3, label='Real-time Jobs in Queue', elem_id='txtOrangeStyle')
 
134
 
135
  with gr.Tab("Text-to-Image"):
136
  with gr.Row(visible=True) as text_to_image:
@@ -160,9 +167,9 @@ with gr.Blocks(css=css) as demo:
160
  with gr.Column():
161
  result_image_2 = gr.Image()
162
 
163
- txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image])
164
 
165
- img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_2)
166
 
167
  gr.Markdown("**Additional Test Configuration Details:**", elem_id='mdStyle')
168
  gr.Markdown(details, elem_id='mdStyle')
 
89
  print("Compute node: ", json.loads(resp.text)["ip"])
90
  except:
91
  print('No inference result. Please check server connection')
92
+ return [None, gr.update(visible=True), gr.update(visible=False)]
93
 
94
  img_byte = base64.b64decode(img_str)
95
  img_io = BytesIO(img_byte) # convert image to file-like object
96
  img = Image.open(img_io) # img is now PIL Image object
97
  print("elapsed time: ", time.time() - start_time)
98
 
99
+ if queue_size.isdigit():
100
+ if int(queue_size) > 4:
101
+ return [img, gr.update(visible=False), gr.update(visible=True)]
102
+ elif int(queue_size) <= 4:
103
+ return [img, gr.update(visible=True), gr.update(visible=False)]
104
+ else:
105
+ return [gr.update(visible=True), gr.update(visible=False)]
106
 
107
 
108
  md = """
 
126
  .duplicate-button img{margin: 0}
127
  #mdStyle{font-size: 0.6rem}
128
  .generating.svelte-1w9161c { border: none }
129
+ #txtGreenStyle {border: 2px solid #32ec48;}
130
+ #txtOrangeStyle {border: 2px solid #e77718;}
131
  '''
132
 
133
  random_seed = random.randint(0, 2147483647)
 
136
  gr.Markdown("# Stable Diffusion Inference Demo on 4th Gen Intel Xeon Scalable Processors")
137
  gr.Markdown(md)
138
 
139
+ textBoxGreen = gr.Textbox(set_msg, every=3, label='Real-time Jobs in Queue', elem_id='txtGreenStyle', visible=True)
140
+ textBoxOrange = gr.Textbox(set_msg, every=3, label='Real-time Jobs in Queue', elem_id='txtOrangeStyle', visible=False)
141
 
142
  with gr.Tab("Text-to-Image"):
143
  with gr.Row(visible=True) as text_to_image:
 
167
  with gr.Column():
168
  result_image_2 = gr.Image()
169
 
170
+ txt2img_button.click(fn=txt2img_generate, inputs=[prompt, inference_steps, seed, guidance_scale], outputs=[result_image, textBoxGreen, textBoxOrange])
171
 
172
+ img2img_button.click(fn=img2img_generate, inputs=[source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=[result_image_2, textBoxGreen, textBoxOrange])
173
 
174
  gr.Markdown("**Additional Test Configuration Details:**", elem_id='mdStyle')
175
  gr.Markdown(details, elem_id='mdStyle')