multimodalart HF staff commited on
Commit
9e6481f
1 Parent(s): f51d066

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -17,7 +17,7 @@ if not torch.cuda.is_available():
17
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
21
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "1") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
@@ -239,7 +239,7 @@ with gr.Blocks(css="style.css") as demo:
239
  gr.Examples(
240
  examples=examples,
241
  inputs=prompt,
242
- outputs=result,
243
  fn=generate,
244
  cache_examples=CACHE_EXAMPLES,
245
  )
 
17
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
21
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "1") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
 
239
  gr.Examples(
240
  examples=examples,
241
  inputs=prompt,
242
+ outputs=[result, seed],
243
  fn=generate,
244
  cache_examples=CACHE_EXAMPLES,
245
  )