KingNish commited on
Commit
b6a9837
1 Parent(s): 87a083e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -64
app.py CHANGED
@@ -10,16 +10,25 @@ import spaces
10
  import torch
11
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  if not torch.cuda.is_available():
14
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
18
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
19
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
20
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
21
 
22
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
23
 
24
  if torch.cuda.is_available():
25
  pipe = StableDiffusionXLPipeline.from_pretrained(
@@ -29,23 +38,14 @@ if torch.cuda.is_available():
29
  add_watermarker=False
30
  )
31
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
32
  pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
33
  pipe.set_adapters("dalle")
34
 
35
  pipe.to("cuda")
36
 
37
-
38
- def save_image(img):
39
- unique_name = str(uuid.uuid4()) + ".png"
40
- img.save(unique_name)
41
- return unique_name
42
-
43
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
44
- if randomize_seed:
45
- seed = random.randint(0, MAX_SEED)
46
- return seed
47
-
48
- @spaces.GPU(duration=30, queue=False)
49
  def generate(
50
  prompt: str,
51
  negative_prompt: str = "",
@@ -54,32 +54,30 @@ def generate(
54
  width: int = 1024,
55
  height: int = 1024,
56
  guidance_scale: float = 3,
57
- num_inference_steps: int = 10,
58
  randomize_seed: bool = False,
59
- use_resolution_binning: bool = True,
60
  progress=gr.Progress(track_tqdm=True),
61
  ):
62
- pipe.to(device)
63
- seed = int(randomize_seed_fn(seed, randomize_seed))
64
- generator = torch.Generator().manual_seed(seed)
65
-
66
- options = {
67
- "prompt":prompt,
68
- "negative_prompt":negative_prompt,
69
- "width":width,
70
- "height":height,
71
- "guidance_scale":guidance_scale,
72
- "num_inference_steps":num_inference_steps,
73
- "generator":generator,
74
- "use_resolution_binning":use_resolution_binning,
75
- "output_type":"pil",
76
-
77
- }
78
 
79
- images = pipe(**options).images
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  image_paths = [save_image(img) for img in images]
 
82
  return image_paths, seed
 
83
 
84
 
85
  examples = [
@@ -112,55 +110,48 @@ with gr.Blocks(css=css) as demo:
112
  container=False,
113
  )
114
  run_button = gr.Button("Run", scale=0)
115
- result = gr.Gallery(label="Result", columns=1)
116
  with gr.Accordion("Advanced options", open=False):
117
- with gr.Row():
118
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
119
- negative_prompt = gr.Text(
120
- label="Negative prompt",
121
- max_lines=5,
122
- lines=4,
123
- placeholder="Enter a negative prompt",
124
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW",
125
- visible=True,
126
- )
127
  seed = gr.Slider(
128
  label="Seed",
129
  minimum=0,
130
  maximum=MAX_SEED,
131
  step=1,
132
  value=0,
 
133
  )
134
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
135
  with gr.Row(visible=True):
136
  width = gr.Slider(
137
  label="Width",
138
  minimum=512,
139
- maximum=MAX_IMAGE_SIZE,
140
- step=64,
141
  value=1024,
142
  )
143
  height = gr.Slider(
144
  label="Height",
145
  minimum=512,
146
- maximum=MAX_IMAGE_SIZE,
147
- step=64,
148
  value=1024,
149
  )
150
  with gr.Row():
151
  guidance_scale = gr.Slider(
152
  label="Guidance Scale",
153
  minimum=0.1,
154
- maximum=6,
155
  step=0.1,
156
- value=3.0,
157
- )
158
- num_inference_steps = gr.Slider(
159
- label="Number of inference steps",
160
- minimum=1,
161
- maximum=15,
162
- step=1,
163
- value=8,
164
  )
165
 
166
  gr.Examples(
@@ -168,7 +159,7 @@ with gr.Blocks(css=css) as demo:
168
  inputs=prompt,
169
  outputs=[result, seed],
170
  fn=generate,
171
- cache_examples=CACHE_EXAMPLES,
172
  )
173
 
174
  use_negative_prompt.change(
@@ -193,12 +184,11 @@ with gr.Blocks(css=css) as demo:
193
  width,
194
  height,
195
  guidance_scale,
196
- num_inference_steps,
197
  randomize_seed,
198
  ],
199
  outputs=[result, seed],
200
- api_name="run",
201
  )
202
 
203
  if __name__ == "__main__":
204
- demo.queue(max_size=20).launch()
 
10
  import torch
11
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
 
13
+ def save_image(img):
14
+ unique_name = str(uuid.uuid4()) + ".png"
15
+ img.save(unique_name)
16
+ return unique_name
17
+
18
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
19
+ if randomize_seed:
20
+ seed = random.randint(0, MAX_SEED)
21
+ return seed
22
+
23
+ MAX_SEED = np.iinfo(np.int32).max
24
+
25
  if not torch.cuda.is_available():
26
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
29
 
30
+ USE_TORCH_COMPILE = 0
31
+ ENABLE_CPU_OFFLOAD = 0
32
 
33
  if torch.cuda.is_available():
34
  pipe = StableDiffusionXLPipeline.from_pretrained(
 
38
  add_watermarker=False
39
  )
40
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
41
+
42
+
43
  pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
44
  pipe.set_adapters("dalle")
45
 
46
  pipe.to("cuda")
47
 
48
+ @spaces.GPU(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
49
  def generate(
50
  prompt: str,
51
  negative_prompt: str = "",
 
54
  width: int = 1024,
55
  height: int = 1024,
56
  guidance_scale: float = 3,
 
57
  randomize_seed: bool = False,
 
58
  progress=gr.Progress(track_tqdm=True),
59
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ seed = int(randomize_seed_fn(seed, randomize_seed))
62
 
63
+ if not use_negative_prompt:
64
+ negative_prompt = "" # type: ignore
65
+
66
+ images = pipe(
67
+ prompt=prompt,
68
+ negative_prompt=negative_prompt,
69
+ width=width,
70
+ height=height,
71
+ guidance_scale=guidance_scale,
72
+ num_inference_steps=25,
73
+ num_images_per_prompt=1,
74
+ cross_attention_kwargs={"scale": 0.65},
75
+ output_type="pil",
76
+ ).images
77
  image_paths = [save_image(img) for img in images]
78
+ print(image_paths)
79
  return image_paths, seed
80
+
81
 
82
 
83
  examples = [
 
110
  container=False,
111
  )
112
  run_button = gr.Button("Run", scale=0)
113
+ result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
114
  with gr.Accordion("Advanced options", open=False):
115
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
116
+ negative_prompt = gr.Text(
117
+ label="Negative prompt",
118
+ lines=4,
119
+ max_lines=6,
120
+ value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)""",
121
+ placeholder="Enter a negative prompt",
122
+ visible=True,
123
+ )
 
124
  seed = gr.Slider(
125
  label="Seed",
126
  minimum=0,
127
  maximum=MAX_SEED,
128
  step=1,
129
  value=0,
130
+ visible=True
131
  )
132
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
133
  with gr.Row(visible=True):
134
  width = gr.Slider(
135
  label="Width",
136
  minimum=512,
137
+ maximum=2048,
138
+ step=8,
139
  value=1024,
140
  )
141
  height = gr.Slider(
142
  label="Height",
143
  minimum=512,
144
+ maximum=2048,
145
+ step=8,
146
  value=1024,
147
  )
148
  with gr.Row():
149
  guidance_scale = gr.Slider(
150
  label="Guidance Scale",
151
  minimum=0.1,
152
+ maximum=20.0,
153
  step=0.1,
154
+ value=6,
 
 
 
 
 
 
 
155
  )
156
 
157
  gr.Examples(
 
159
  inputs=prompt,
160
  outputs=[result, seed],
161
  fn=generate,
162
+ cache_examples=False,
163
  )
164
 
165
  use_negative_prompt.change(
 
184
  width,
185
  height,
186
  guidance_scale,
 
187
  randomize_seed,
188
  ],
189
  outputs=[result, seed],
190
+ api_name="run"
191
  )
192
 
193
  if __name__ == "__main__":
194
+ demo.queue(max_size=20).launch(show_api=False, debug=False)