kadirnar commited on
Commit
c7c3db7
·
verified ·
1 Parent(s): df81eb7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -96
app.py CHANGED
@@ -1,113 +1,120 @@
1
 
 
2
 
3
  import os
4
  import random
5
  import uuid
6
- import json
7
 
8
  import gradio as gr
9
  import numpy as np
10
  from PIL import Image
11
  import spaces
12
  import torch
13
- from diffusers import StableDiffusionXLPipeline, DPMSolverSinglestepScheduler
14
 
15
- # Use environment variables for flexibility
16
- MODEL_ID = os.getenv("MODEL_ID", "sd-community/sdxl-flash")
17
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
 
 
 
18
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
19
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
20
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
21
 
22
- # Determine device and load model outside of function for efficiency
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
- pipe = StableDiffusionXLPipeline.from_single_file(
25
- "https://huggingface.co/kadirnar/Black-Hole/blob/main/tachyon.safetensors",
26
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
27
- use_safetensors=True,
28
- add_watermarker=False,
29
- ).to(device)
30
- pipe.scheduler = DPMSolverSinglestepScheduler(use_karras_sigmas=True).from_config(pipe.scheduler.config)
31
-
32
- # Torch compile for potential speedup (experimental)
33
- if USE_TORCH_COMPILE:
34
- pipe.compile()
35
-
36
- # CPU offloading for larger RAM capacity (experimental)
37
- if ENABLE_CPU_OFFLOAD:
38
- pipe.enable_model_cpu_offload()
39
 
40
- MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
43
  if randomize_seed:
44
  seed = random.randint(0, MAX_SEED)
45
  return seed
46
 
47
- @spaces.GPU()
 
48
  def generate(
49
  prompt: str,
50
  negative_prompt: str = "",
51
  use_negative_prompt: bool = False,
52
- seed: int = 1,
53
  width: int = 1024,
54
  height: int = 1024,
55
  guidance_scale: float = 3,
56
- num_inference_steps: int = 30,
57
  randomize_seed: bool = False,
58
- use_resolution_binning: bool = True,
59
- num_images: int = 1, # Number of images to generate
60
  progress=gr.Progress(track_tqdm=True),
61
  ):
 
62
  seed = int(randomize_seed_fn(seed, randomize_seed))
63
- generator = torch.Generator(device=device).manual_seed(seed)
64
-
65
- # Improved options handling
66
- options = {
67
- "prompt": [prompt] * num_images,
68
- "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
69
- "width": width,
70
- "height": height,
71
- "guidance_scale": guidance_scale,
72
- "num_inference_steps": num_inference_steps,
73
- "generator": generator,
74
- "output_type": "pil",
75
- }
76
-
77
- # Use resolution binning for faster generation with less VRAM usage
78
- if use_resolution_binning:
79
- options["use_resolution_binning"] = True
80
-
81
- # Generate images potentially in batches
82
- images = []
83
- for i in range(0, num_images, BATCH_SIZE):
84
- batch_options = options.copy()
85
- batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
86
- if "negative_prompt" in batch_options:
87
- batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
88
- images.extend(pipe(**batch_options).images)
89
-
90
- return images
91
 
92
  examples = [
 
93
  "a cat eating a piece of cheese",
94
- "a ROBOT riding a BLUE horse on Mars, photorealistic, 4k",
95
- "Ironman VS Hulk, ultrarealistic",
96
- "Astronaut in a jungle, cold color palette, oil pastel, detailed, 8k",
97
- "An alien holding a sign board containing the word 'Flash', futuristic, neonpunk",
98
- "Kids going to school, Anime style"
99
  ]
100
 
101
  css = '''
102
- .gradio-container{max-width: 700px !important}
103
  h1{text-align:center}
104
- footer {
105
- visibility: hidden
106
- }
107
  '''
108
-
109
  with gr.Blocks(css=css) as demo:
110
- gr.Markdown("""# Black Hole SDXL-Lightning""")
 
 
 
 
 
111
  with gr.Group():
112
  with gr.Row():
113
  prompt = gr.Text(
@@ -118,24 +125,14 @@ with gr.Blocks(css=css) as demo:
118
  container=False,
119
  )
120
  run_button = gr.Button("Run", scale=0)
121
- result = gr.Gallery(elem_id="gallery", label="Result", show_label=False)
122
-
123
  with gr.Accordion("Advanced options", open=False):
124
- num_images = gr.Slider(
125
- label="Number of Images",
126
- minimum=1,
127
- maximum=4,
128
- step=1,
129
- value=1,
130
- )
131
  with gr.Row():
132
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
133
  negative_prompt = gr.Text(
134
  label="Negative prompt",
135
- max_lines=5,
136
- lines=4,
137
  placeholder="Enter a negative prompt",
138
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW",
139
  visible=True,
140
  )
141
  seed = gr.Slider(
@@ -149,38 +146,33 @@ with gr.Blocks(css=css) as demo:
149
  with gr.Row(visible=True):
150
  width = gr.Slider(
151
  label="Width",
152
- minimum=512,
153
  maximum=MAX_IMAGE_SIZE,
154
- step=64,
155
  value=1024,
156
  )
157
  height = gr.Slider(
158
  label="Height",
159
- minimum=512,
160
  maximum=MAX_IMAGE_SIZE,
161
- step=64,
162
  value=1024,
163
  )
164
  with gr.Row():
165
  guidance_scale = gr.Slider(
166
  label="Guidance Scale",
167
  minimum=0.1,
168
- maximum=6,
169
  step=0.1,
170
  value=3.0,
171
  )
172
- num_inference_steps = gr.Slider(
173
- label="Number of inference steps",
174
- minimum=1,
175
- maximum=15,
176
- step=1,
177
- value=4,
178
- )
179
 
180
  gr.Examples(
181
  examples=examples,
182
  inputs=prompt,
183
- cache_examples=False
 
 
184
  )
185
 
186
  use_negative_prompt.change(
@@ -205,13 +197,11 @@ with gr.Blocks(css=css) as demo:
205
  width,
206
  height,
207
  guidance_scale,
208
- num_inference_steps,
209
  randomize_seed,
210
- num_images
211
  ],
212
  outputs=[result, seed],
213
  api_name="run",
214
  )
215
 
216
  if __name__ == "__main__":
217
- demo.queue().launch()
 
1
 
2
+ #!/usr/bin/env python
3
 
4
  import os
5
  import random
6
  import uuid
 
7
 
8
  import gradio as gr
9
  import numpy as np
10
  from PIL import Image
11
  import spaces
12
  import torch
13
+ from diffusers import DiffusionPipeline
14
 
15
+ DESCRIPTION = """# Playground v2.5"""
16
+ if not torch.cuda.is_available():
17
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
18
+
19
+ MAX_SEED = np.iinfo(np.int32).max
20
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
21
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
 
24
 
 
25
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ NUM_IMAGES_PER_PROMPT = 1
28
+
29
+ if torch.cuda.is_available():
30
+ pipe = DiffusionPipeline.from_pretrained(
31
+ "playgroundai/playground-v2.5-1024px-aesthetic",
32
+ torch_dtype=torch.float16,
33
+ use_safetensors=True,
34
+ add_watermarker=False,
35
+ variant="fp16"
36
+ )
37
+ if ENABLE_CPU_OFFLOAD:
38
+ pipe.enable_model_cpu_offload()
39
+ else:
40
+ pipe.to(device)
41
+ print("Loaded on Device!")
42
+
43
+ if USE_TORCH_COMPILE:
44
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
45
+ print("Model Compiled!")
46
+
47
+
48
+ def save_image(img):
49
+ unique_name = str(uuid.uuid4()) + ".png"
50
+ img.save(unique_name)
51
+ return unique_name
52
+
53
 
54
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
55
  if randomize_seed:
56
  seed = random.randint(0, MAX_SEED)
57
  return seed
58
 
59
+
60
+ @spaces.GPU(enable_queue=True)
61
  def generate(
62
  prompt: str,
63
  negative_prompt: str = "",
64
  use_negative_prompt: bool = False,
65
+ seed: int = 0,
66
  width: int = 1024,
67
  height: int = 1024,
68
  guidance_scale: float = 3,
 
69
  randomize_seed: bool = False,
70
+ use_resolution_binning: bool = True,
 
71
  progress=gr.Progress(track_tqdm=True),
72
  ):
73
+ pipe.to(device)
74
  seed = int(randomize_seed_fn(seed, randomize_seed))
75
+ generator = torch.Generator().manual_seed(seed)
76
+
77
+ if not use_negative_prompt:
78
+ negative_prompt = None # type: ignore
79
+
80
+ images = pipe(
81
+ prompt=prompt,
82
+ negative_prompt=negative_prompt,
83
+ width=width,
84
+ height=height,
85
+ guidance_scale=guidance_scale,
86
+ num_inference_steps=25,
87
+ generator=generator,
88
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
89
+ use_resolution_binning=use_resolution_binning,
90
+ output_type="pil",
91
+ ).images
92
+
93
+ image_paths = [save_image(img) for img in images]
94
+ print(image_paths)
95
+ return image_paths, seed
96
+
 
 
 
 
 
 
97
 
98
  examples = [
99
+ "neon holography crystal cat",
100
  "a cat eating a piece of cheese",
101
+ "an astronaut riding a horse in space",
102
+ "a cartoon of a boy playing with a tiger",
103
+ "a cute robot artist painting on an easel, concept art",
104
+ "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
 
105
  ]
106
 
107
  css = '''
108
+ .gradio-container{max-width: 560px !important}
109
  h1{text-align:center}
 
 
 
110
  '''
 
111
  with gr.Blocks(css=css) as demo:
112
+ gr.Markdown(DESCRIPTION)
113
+ gr.DuplicateButton(
114
+ value="Duplicate Space for private use",
115
+ elem_id="duplicate-button",
116
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
117
+ )
118
  with gr.Group():
119
  with gr.Row():
120
  prompt = gr.Text(
 
125
  container=False,
126
  )
127
  run_button = gr.Button("Run", scale=0)
128
+ result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
 
129
  with gr.Accordion("Advanced options", open=False):
 
 
 
 
 
 
 
130
  with gr.Row():
131
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
132
  negative_prompt = gr.Text(
133
  label="Negative prompt",
134
+ max_lines=1,
 
135
  placeholder="Enter a negative prompt",
 
136
  visible=True,
137
  )
138
  seed = gr.Slider(
 
146
  with gr.Row(visible=True):
147
  width = gr.Slider(
148
  label="Width",
149
+ minimum=256,
150
  maximum=MAX_IMAGE_SIZE,
151
+ step=32,
152
  value=1024,
153
  )
154
  height = gr.Slider(
155
  label="Height",
156
+ minimum=256,
157
  maximum=MAX_IMAGE_SIZE,
158
+ step=32,
159
  value=1024,
160
  )
161
  with gr.Row():
162
  guidance_scale = gr.Slider(
163
  label="Guidance Scale",
164
  minimum=0.1,
165
+ maximum=20,
166
  step=0.1,
167
  value=3.0,
168
  )
 
 
 
 
 
 
 
169
 
170
  gr.Examples(
171
  examples=examples,
172
  inputs=prompt,
173
+ outputs=[result, seed],
174
+ fn=generate,
175
+ cache_examples=CACHE_EXAMPLES,
176
  )
177
 
178
  use_negative_prompt.change(
 
197
  width,
198
  height,
199
  guidance_scale,
 
200
  randomize_seed,
 
201
  ],
202
  outputs=[result, seed],
203
  api_name="run",
204
  )
205
 
206
  if __name__ == "__main__":
207
+ demo.queue(max_size=20).launch()