Spaces:
Runtime error
Runtime error
RohitGandikota
commited on
Commit
β’
6cca5dc
1
Parent(s):
71c541b
pushing sdxl turbo inference
Browse files
app.py
CHANGED
@@ -2,6 +2,15 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
import os
|
4 |
from utils import call
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from diffusers.pipelines import StableDiffusionXLPipeline
|
6 |
StableDiffusionXLPipeline.__call__ = call
|
7 |
import os
|
@@ -57,7 +66,9 @@ class Demo:
|
|
57 |
self.generating = False
|
58 |
self.device = 'cuda'
|
59 |
self.weight_dtype = torch.bfloat16
|
60 |
-
|
|
|
|
|
61 |
self.pipe.enable_xformers_memory_efficient_attention()
|
62 |
with gr.Blocks() as demo:
|
63 |
self.layout()
|
@@ -78,7 +89,7 @@ class Demo:
|
|
78 |
|
79 |
with gr.Row():
|
80 |
|
81 |
-
self.explain_infr = gr.Markdown(value='This is a demo of [Concept Sliders: LoRA Adaptors for Precise Control in Diffusion Models](https://sliders.baulab.info/). To try out a model that can control a particular concept, select a model and enter any prompt, choose a seed, and finally choose the SDEdit timestep for structural preservation. Higher SDEdit timesteps results in more structural change. For example, if you select the model "Surprised Look" you can generate images for the prompt "A picture of a person, realistic, 8k" and compare the slider effect to the image generated by original model. We have also provided several other pre-fine-tuned models like "repair" sliders to repair flaws in SDXL generated images (Check out the "Pretrained Sliders" drop-down). You can also train and run your own custom sliders. Check out the "train" section for custom concept slider training.')
|
82 |
|
83 |
with gr.Row():
|
84 |
|
@@ -316,10 +327,10 @@ class Demo:
|
|
316 |
|
317 |
|
318 |
generator = torch.manual_seed(seed)
|
319 |
-
edited_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=
|
320 |
|
321 |
generator = torch.manual_seed(seed)
|
322 |
-
original_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=
|
323 |
|
324 |
del unet, network
|
325 |
unet = None
|
|
|
2 |
import torch
|
3 |
import os
|
4 |
from utils import call
|
5 |
+
from diffusers import (
|
6 |
+
DDPMScheduler,
|
7 |
+
DDIMScheduler,
|
8 |
+
PNDMScheduler,
|
9 |
+
LMSDiscreteScheduler,
|
10 |
+
EulerAncestralDiscreteScheduler,
|
11 |
+
EulerDiscreteScheduler,
|
12 |
+
DPMSolverMultistepScheduler,
|
13 |
+
)
|
14 |
from diffusers.pipelines import StableDiffusionXLPipeline
|
15 |
StableDiffusionXLPipeline.__call__ = call
|
16 |
import os
|
|
|
66 |
self.generating = False
|
67 |
self.device = 'cuda'
|
68 |
self.weight_dtype = torch.bfloat16
|
69 |
+
model_id = "stabilityai/sdxl-turbo"
|
70 |
+
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
71 |
+
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
72 |
self.pipe.enable_xformers_memory_efficient_attention()
|
73 |
with gr.Blocks() as demo:
|
74 |
self.layout()
|
|
|
89 |
|
90 |
with gr.Row():
|
91 |
|
92 |
+
self.explain_infr = gr.Markdown(value='This is a demo of [Concept Sliders: LoRA Adaptors for Precise Control in Diffusion Models](https://sliders.baulab.info/). To try out a model that can control a particular concept, select a model and enter any prompt, choose a seed, and finally choose the SDEdit timestep for structural preservation. Higher SDEdit timesteps results in more structural change. For example, if you select the model "Surprised Look" you can generate images for the prompt "A picture of a person, realistic, 8k" and compare the slider effect to the image generated by original model. We have also provided several other pre-fine-tuned models like "repair" sliders to repair flaws in SDXL generated images (Check out the "Pretrained Sliders" drop-down). You can also train and run your own custom sliders. Check out the "train" section for custom concept slider training. <b>Current Inference is running on SDXL Turbo!</b>')
|
93 |
|
94 |
with gr.Row():
|
95 |
|
|
|
327 |
|
328 |
|
329 |
generator = torch.manual_seed(seed)
|
330 |
+
edited_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=3, generator=generator, network=network, start_noise=int(start_noise), scale=float(scale), unet=unet, guidance_scale=1).images[0]
|
331 |
|
332 |
generator = torch.manual_seed(seed)
|
333 |
+
original_image = self.pipe(prompt, num_images_per_prompt=1, num_inference_steps=3, generator=generator, network=network, start_noise=start_noise, scale=0, unet=unet, guidance_scale=1).images[0]
|
334 |
|
335 |
del unet, network
|
336 |
unet = None
|