bedead commited on
Commit
458eb67
·
verified ·
1 Parent(s): b6a8c5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -175,7 +175,7 @@ def run(*args):
175
 
176
  # Clear GPU memory cache so less likely to OOM
177
  torch.cuda.empty_cache()
178
- return ims, output_conds
179
 
180
 
181
  def change_visible(im1, im2, val):
@@ -191,15 +191,8 @@ def change_visible(im1, im2, val):
191
  outputs[im2] = gr.update(visible=True)
192
  return outputs
193
 
194
-
195
- DESCRIPTION = '# [CoAdapter (Composable Adapter)](https://github.com/TencentARC/T2I-Adapter)'
196
-
197
- DESCRIPTION += f'<p>Gradio demo for **CoAdapter**: [[GitHub]](https://github.com/TencentARC/T2I-Adapter), [[Details]](https://github.com/TencentARC/T2I-Adapter/blob/main/docs/coadapter.md). If CoAdapter is helpful, please help to ⭐ the [Github Repo](https://github.com/TencentARC/T2I-Adapter) and recommend it to your friends 😊 </p>'
198
-
199
- DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
200
  # with gr.Blocks(title="CoAdapter", css=".gr-box {border-color: #8136e2}") as demo:
201
  with gr.Blocks(css='style.css') as demo:
202
- gr.Markdown(DESCRIPTION)
203
 
204
  btns = []
205
  ims1 = []
@@ -238,10 +231,10 @@ with gr.Blocks(css='style.css') as demo:
238
  cond_weights.append(cond_weight)
239
 
240
  with gr.Column():
241
- prompt = gr.Textbox(label="Prompt")
242
- neg_prompt = gr.Textbox(label="Negative Prompt", value=DEFAULT_NEGATIVE_PROMPT)
243
  scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", value=7.5, minimum=1, maximum=20, step=0.1)
244
- n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=1, step=1)
245
  seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=10000, step=1)
246
  steps = gr.Slider(label="Steps", value=50, minimum=10, maximum=100, step=1)
247
  resize_short_edge = gr.Slider(label="Image resolution", value=512, minimum=320, maximum=1024, step=1)
@@ -255,10 +248,10 @@ with gr.Blocks(css='style.css') as demo:
255
  with gr.Row():
256
  submit = gr.Button("Generate")
257
  output = gr.Gallery(rows=2, height='auto')
258
- cond = gr.Gallery(rows=2, height='auto')
259
 
260
  inps = list(chain(btns, ims1, ims2, cond_weights))
261
  inps.extend([prompt, neg_prompt, scale, n_samples, seed, steps, resize_short_edge, cond_tau])
262
- submit.click(fn=run, inputs=inps, outputs=[output, cond])
263
  # demo.launch()
264
- demo.launch(debug=True, share=True)
 
175
 
176
  # Clear GPU memory cache so less likely to OOM
177
  torch.cuda.empty_cache()
178
+ return ims
179
 
180
 
181
  def change_visible(im1, im2, val):
 
191
  outputs[im2] = gr.update(visible=True)
192
  return outputs
193
 
 
 
 
 
 
 
194
  # with gr.Blocks(title="CoAdapter", css=".gr-box {border-color: #8136e2}") as demo:
195
  with gr.Blocks(css='style.css') as demo:
 
196
 
197
  btns = []
198
  ims1 = []
 
231
  cond_weights.append(cond_weight)
232
 
233
  with gr.Column():
234
+ prompt = gr.Textbox(label="Prompt", visible=False)
235
+ neg_prompt = gr.Textbox(visible=False, label="Negative Prompt", value=DEFAULT_NEGATIVE_PROMPT)
236
  scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", value=7.5, minimum=1, maximum=20, step=0.1)
237
+ n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=3, step=1)
238
  seed = gr.Slider(label="Seed", value=42, minimum=0, maximum=10000, step=1)
239
  steps = gr.Slider(label="Steps", value=50, minimum=10, maximum=100, step=1)
240
  resize_short_edge = gr.Slider(label="Image resolution", value=512, minimum=320, maximum=1024, step=1)
 
248
  with gr.Row():
249
  submit = gr.Button("Generate")
250
  output = gr.Gallery(rows=2, height='auto')
251
+ # cond = gr.Gallery(rows=2, height='auto')
252
 
253
  inps = list(chain(btns, ims1, ims2, cond_weights))
254
  inps.extend([prompt, neg_prompt, scale, n_samples, seed, steps, resize_short_edge, cond_tau])
255
+ submit.click(fn=run, inputs=inps, outputs=output)
256
  # demo.launch()
257
+ demo.launch(debug=True, share=True)