ChenWu98 commited on
Commit
f9410ef
•
1 Parent(s): e15af30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -20
app.py CHANGED
@@ -16,19 +16,20 @@ is_colab = utils.is_google_colab()
16
  colab_instruction = "" if is_colab else """
17
  <p>You can skip the queue using Colab: <a href="https://colab.research.google.com/gist/ChenWu98/0aa4fe7be80f6b45d3d055df9f14353a/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>"""
18
 
19
- model_id_or_path = "CompVis/stable-diffusion-v1-4"
20
- if is_colab:
21
- scheduler = DDIMScheduler.from_config(model_id_or_path, subfolder="scheduler")
22
- pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler)
23
- else:
24
- import streamlit as st
25
- scheduler = DDIMScheduler.from_config(model_id_or_path, use_auth_token=st.secrets["USER_TOKEN"], subfolder="scheduler")
26
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
27
- pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, use_auth_token=st.secrets["USER_TOKEN"], scheduler=scheduler, torch_dtype=torch_dtype)
28
- tokenizer = pipe.tokenizer
29
-
30
- if torch.cuda.is_available():
31
- pipe = pipe.to("cuda")
 
32
 
33
  device_print = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
34
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -294,8 +295,19 @@ with gr.Blocks(css=css) as demo:
294
  <p>
295
  <b>Quick start</b>: <br>
296
  1. Click one row of Examples at the end of this page. It will fill all inputs needed. <br>
297
- 2. Click the "Edit" button. <br>
298
  </p>
 
 
 
 
 
 
 
 
 
 
 
299
  <p>
300
  <b>How to use:</b> <br>
301
  1. Upload an image. <br>
@@ -304,7 +316,7 @@ with gr.Blocks(css=css) as demo:
304
  4. Select the strength (smaller strength means better content preservation). <br>
305
  5 (optional). Configurate Cross Attention Control options (e.g., CAC type, cross replace steps, self replace steps). <br>
306
  6 (optional). Configurate other options (e.g., image size, inference steps, random seed). <br>
307
- 7. Click the "Edit" button. <br>
308
  </p>
309
  <p>
310
  <b>Notes:</b> <br>
@@ -318,13 +330,9 @@ with gr.Blocks(css=css) as demo:
318
  1. 30s on A10G. <br>
319
  2. 90s on T4. <br>
320
  </p>
321
- <p>
322
- {colab_instruction}
323
- Running on <b>{device_print}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
324
- </p>
325
  </div>
326
  """
327
- )
328
  with gr.Row():
329
 
330
  with gr.Column(scale=55):
 
16
  colab_instruction = "" if is_colab else """
17
  <p>You can skip the queue using Colab: <a href="https://colab.research.google.com/gist/ChenWu98/0aa4fe7be80f6b45d3d055df9f14353a/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>"""
18
 
19
+ if True:
20
+ model_id_or_path = "CompVis/stable-diffusion-v1-4"
21
+ if is_colab:
22
+ scheduler = DDIMScheduler.from_config(model_id_or_path, subfolder="scheduler")
23
+ pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler)
24
+ else:
25
+ import streamlit as st
26
+ scheduler = DDIMScheduler.from_config(model_id_or_path, use_auth_token=st.secrets["USER_TOKEN"], subfolder="scheduler")
27
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
28
+ pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, use_auth_token=st.secrets["USER_TOKEN"], scheduler=scheduler, torch_dtype=torch_dtype)
29
+ tokenizer = pipe.tokenizer
30
+
31
+ if torch.cuda.is_available():
32
+ pipe = pipe.to("cuda")
33
 
34
  device_print = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
295
  <p>
296
  <b>Quick start</b>: <br>
297
  1. Click one row of Examples at the end of this page. It will fill all inputs needed. <br>
298
+ 2. Click the "Run CycleDiffusion" button. <br>
299
  </p>
300
+ <p>
301
+ {colab_instruction}
302
+ Running on <b>{device_print}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
303
+ </p>
304
+ </div>
305
+ """
306
+ )
307
+ with gr.Accordion("See Details", open=False):
308
+ gr.HTML(
309
+ f"""
310
+ <div class="cycle-diffusion-div">
311
  <p>
312
  <b>How to use:</b> <br>
313
  1. Upload an image. <br>
 
316
  4. Select the strength (smaller strength means better content preservation). <br>
317
  5 (optional). Configurate Cross Attention Control options (e.g., CAC type, cross replace steps, self replace steps). <br>
318
  6 (optional). Configurate other options (e.g., image size, inference steps, random seed). <br>
319
+ 7. Click the "Run CycleDiffusion" button. <br>
320
  </p>
321
  <p>
322
  <b>Notes:</b> <br>
 
330
  1. 30s on A10G. <br>
331
  2. 90s on T4. <br>
332
  </p>
 
 
 
 
333
  </div>
334
  """
335
+ )
336
  with gr.Row():
337
 
338
  with gr.Column(scale=55):