ehristoforu commited on
Commit
dc49fcd
1 Parent(s): c4af7c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -14
app.py CHANGED
@@ -9,7 +9,7 @@ import numpy as np
9
  from PIL import Image
10
  import spaces
11
  import torch
12
- from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
13
  from diffusers.utils import load_image
14
 
15
  DESCRIPTION = """
@@ -52,6 +52,13 @@ if torch.cuda.is_available():
52
  pipe_epic.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_epic.scheduler.config)
53
  pipe_epic.to(device)
54
 
 
 
 
 
 
 
 
55
  pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
56
  "fluently/Fluently-v4-inpainting",
57
  torch_dtype=torch.float16,
@@ -61,12 +68,20 @@ if torch.cuda.is_available():
61
  pipe_inpaint.to(device)
62
 
63
  pipe_xl = StableDiffusionXLPipeline.from_pretrained(
64
- "fluently/Fluently-XL-v2",
65
  torch_dtype=torch.float16,
66
  use_safetensors=True,
67
  )
68
  pipe_xl.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl.scheduler.config)
69
  pipe_xl.to(device)
 
 
 
 
 
 
 
 
70
 
71
  print("Loaded on Device!")
72
 
@@ -84,10 +99,12 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
84
  return seed
85
 
86
  def get_model(model):
87
- if model == "Fluently v4 inpaint":
88
- return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
 
 
89
  else:
90
- return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
91
 
92
 
93
  @spaces.GPU(enable_queue=True)
@@ -146,7 +163,7 @@ def generate(
146
  num_images_per_prompt=1,
147
  output_type="pil",
148
  ).images
149
- elif model == "Fluently XL v2":
150
  images = pipe_xl(
151
  prompt=prompt,
152
  negative_prompt=negative_prompt,
@@ -157,7 +174,18 @@ def generate(
157
  num_images_per_prompt=1,
158
  output_type="pil",
159
  ).images
160
- else:
 
 
 
 
 
 
 
 
 
 
 
161
  blurred_mask = pipe_inpaint.mask_processor.blur(mask_image, blur_factor=blur_factor)
162
  images = pipe_inpaint(
163
  prompt=prompt,
@@ -172,6 +200,21 @@ def generate(
172
  num_images_per_prompt=1,
173
  output_type="pil",
174
  ).images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  image_paths = [save_image(img) for img in images]
177
  print(image_paths)
@@ -209,8 +252,8 @@ with gr.Blocks(title="Fluently Playground", css=css) as demo:
209
  with gr.Row():
210
  model = gr.Radio(
211
  label="Model",
212
- choices=["Fluently XL v2", "Fluently v4", "Fluently Anime", "Fluently Epic", "Fluently v4 inpaint"],
213
- value="Fluently v4",
214
  interactive=True,
215
  )
216
 
@@ -235,14 +278,14 @@ with gr.Blocks(title="Fluently Playground", css=css) as demo:
235
  run_button = gr.Button("Run", scale=0)
236
  result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
237
  with gr.Accordion("Advanced options", open=False):
238
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
239
  negative_prompt = gr.Text(
240
  label="Negative prompt",
241
- max_lines=5,
242
- lines=4,
243
  value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation""",
244
  placeholder="Enter a negative prompt",
245
- visible=False,
246
  )
247
  seed = gr.Slider(
248
  label="Seed",
@@ -275,6 +318,7 @@ with gr.Blocks(title="Fluently Playground", css=css) as demo:
275
  maximum=20.0,
276
  step=0.1,
277
  value=5.5,
 
278
  )
279
 
280
  gr.Examples(
@@ -295,7 +339,7 @@ with gr.Blocks(title="Fluently Playground", css=css) as demo:
295
  model.change(
296
  fn=get_model,
297
  inputs=model,
298
- outputs=[md_mask, inpaint_image, mask_image, blur_factor, strength],
299
  api_name=False,
300
  )
301
 
 
9
  from PIL import Image
10
  import spaces
11
  import torch
12
+ from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionXLPipeline, StableDiffusionXLInpaintPipeline, EulerAncestralDiscreteScheduler, DPMSolverSinglestepScheduler
13
  from diffusers.utils import load_image
14
 
15
  DESCRIPTION = """
 
52
  pipe_epic.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_epic.scheduler.config)
53
  pipe_epic.to(device)
54
 
55
+ pipe_xl_inpaint = StableDiffusionXLInpaintPipeline.from_single_file(
56
+ "https://huggingface.co/fluently/Fluently-XL-v3-inpainting/blob/main/FluentlyXL-v3-inpainting.safetensors",
57
+ torch_dtype=torch.float16,
58
+ use_safetensors=True,
59
+ )
60
+ pipe_xl_inpaint.to(device)
61
+
62
  pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
63
  "fluently/Fluently-v4-inpainting",
64
  torch_dtype=torch.float16,
 
68
  pipe_inpaint.to(device)
69
 
70
  pipe_xl = StableDiffusionXLPipeline.from_pretrained(
71
+ "fluently/Fluently-XL-v3",
72
  torch_dtype=torch.float16,
73
  use_safetensors=True,
74
  )
75
  pipe_xl.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl.scheduler.config)
76
  pipe_xl.to(device)
77
+
78
+ pipe_xl_lightning = StableDiffusionXLPipeline.from_pretrained(
79
+ "fluently/Fluently-XL-v3-lightning",
80
+ torch_dtype=torch.float16,
81
+ use_safetensors=True,
82
+ )
83
+ pipe_xl_lightning.scheduler = DPMSolverSinglestepScheduler.from_config(pipe_xl_lightning.scheduler.config, use_karras_sigmas=False, timestep_spacing="trailing")
84
+ pipe_xl_lightning.to(device)
85
 
86
  print("Loaded on Device!")
87
 
 
99
  return seed
100
 
101
  def get_model(model):
102
+ if model == "Fluently v4 inpaint" or model == "Fluently XL v3 inpaint":
103
+ return gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(interactive=True)
104
+ if model == "Fluently XL v3 Lightning":
105
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(interactive=False)
106
  else:
107
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(interactive=True)
108
 
109
 
110
  @spaces.GPU(enable_queue=True)
 
163
  num_images_per_prompt=1,
164
  output_type="pil",
165
  ).images
166
+ elif model == "Fluently XL v3":
167
  images = pipe_xl(
168
  prompt=prompt,
169
  negative_prompt=negative_prompt,
 
174
  num_images_per_prompt=1,
175
  output_type="pil",
176
  ).images
177
+ elif model == "Fluently XL v3 Lightning":
178
+ images = pipe_xl_lightning(
179
+ prompt=prompt,
180
+ negative_prompt=negative_prompt,
181
+ width=width,
182
+ height=height,
183
+ guidance_scale=2,
184
+ num_inference_steps=5,
185
+ num_images_per_prompt=1,
186
+ output_type="pil",
187
+ ).images
188
+ elif model == "Fluently v4 inpaint":
189
  blurred_mask = pipe_inpaint.mask_processor.blur(mask_image, blur_factor=blur_factor)
190
  images = pipe_inpaint(
191
  prompt=prompt,
 
200
  num_images_per_prompt=1,
201
  output_type="pil",
202
  ).images
203
+ else:
204
+ blurred_mask = pipe_inpaint.mask_processor.blur(mask_image, blur_factor=blur_factor)
205
+ images = pipe_xl_inpaint(
206
+ prompt=prompt,
207
+ image=inpaint_image,
208
+ mask_image=blurred_mask,
209
+ negative_prompt=negative_prompt,
210
+ width=width,
211
+ height=height,
212
+ guidance_scale=guidance_scale,
213
+ num_inference_steps=25,
214
+ strength=strength,
215
+ num_images_per_prompt=1,
216
+ output_type="pil",
217
+ ).images
218
 
219
  image_paths = [save_image(img) for img in images]
220
  print(image_paths)
 
252
  with gr.Row():
253
  model = gr.Radio(
254
  label="Model",
255
+ choices=["Fluently XL v3 Lightning","Fluently XL v3", "Fluently v4", "Fluently Anime", "Fluently Epic", "Fluently XL v3 inpaint", "Fluently v4 inpaint"],
256
+ value="Fluently XL v3 Lightning",
257
  interactive=True,
258
  )
259
 
 
278
  run_button = gr.Button("Run", scale=0)
279
  result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
280
  with gr.Accordion("Advanced options", open=False):
281
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
282
  negative_prompt = gr.Text(
283
  label="Negative prompt",
284
+ max_lines=6,
285
+ lines=5,
286
  value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation""",
287
  placeholder="Enter a negative prompt",
288
+ visible=True,
289
  )
290
  seed = gr.Slider(
291
  label="Seed",
 
318
  maximum=20.0,
319
  step=0.1,
320
  value=5.5,
321
+ interactive=False,
322
  )
323
 
324
  gr.Examples(
 
339
  model.change(
340
  fn=get_model,
341
  inputs=model,
342
+ outputs=[md_mask, inpaint_image, mask_image, blur_factor, strength, guidance_scale],
343
  api_name=False,
344
  )
345