Minor enhancements

#11
by msqrd - opened
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -1,8 +1,4 @@
1
- import os
2
- import random
3
- import uuid
4
- import json
5
-
6
  import gradio as gr
7
  import numpy as np
8
  from PIL import Image
@@ -10,8 +6,9 @@ import spaces
10
  import torch
11
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
 
 
13
  if not torch.cuda.is_available():
14
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
@@ -21,15 +18,18 @@ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
21
 
22
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
 
 
 
 
 
 
 
 
 
24
  if torch.cuda.is_available():
25
- pipe = StableDiffusionXLPipeline.from_pretrained(
26
- "sd-community/sdxl-flash",
27
- torch_dtype=torch.float16,
28
- use_safetensors=True,
29
- add_watermarker=False
30
- )
31
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
32
  pipe.to("cuda")
 
 
33
 
34
  def save_image(img):
35
  unique_name = str(uuid.uuid4()) + ".png"
@@ -90,14 +90,15 @@ examples = [
90
 
91
  css = '''
92
  .gradio-container{max-width: 700px !important}
93
- h1{text-align:center}
94
  footer {
95
  visibility: hidden
96
  }
97
  '''
98
  with gr.Blocks(css=css) as demo:
99
- gr.Markdown("""# SDXL Flash
100
- ### First Image processing takes time then images generate faster.""")
 
101
  with gr.Group():
102
  with gr.Row():
103
  prompt = gr.Text(
 
1
+ import os, random, uuid, json
 
 
 
 
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image
 
6
  import torch
7
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
8
 
9
+ DESCRIPTION = None
10
  if not torch.cuda.is_available():
11
+ DESCRIPTION = "\nRunning on CPU 🥶 This demo may not work on CPU."
12
 
13
  MAX_SEED = np.iinfo(np.int32).max
14
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
 
18
 
19
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
 
21
+ pipe = StableDiffusionXLPipeline.from_pretrained(
22
+ "sd-community/sdxl-flash",
23
+ torch_dtype=torch.float16,
24
+ use_safetensors=True,
25
+ add_watermarker=False
26
+ )
27
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
28
+
29
  if torch.cuda.is_available():
 
 
 
 
 
 
 
30
  pipe.to("cuda")
31
+ else:
32
+ pipe.to("cpu")
33
 
34
  def save_image(img):
35
  unique_name = str(uuid.uuid4()) + ".png"
 
90
 
91
  css = '''
92
  .gradio-container{max-width: 700px !important}
93
+ h1{text-align:left}
94
  footer {
95
  visibility: hidden
96
  }
97
  '''
98
  with gr.Blocks(css=css) as demo:
99
+ gr.Markdown(f"""# SDXL Flash
100
+ ### First Image processing takes time then images generate faster.
101
+ {DESCRIPTION}""")
102
  with gr.Group():
103
  with gr.Row():
104
  prompt = gr.Text(