Spaces:
Running
on
Zero
Running
on
Zero
amazonaws-la
commited on
Commit
•
fabbd5e
1
Parent(s):
7014106
Update app.py
Browse files
app.py
CHANGED
@@ -10,9 +10,10 @@ import numpy as np
|
|
10 |
import PIL.Image
|
11 |
import spaces
|
12 |
import torch
|
13 |
-
import diffusers
|
14 |
from diffusers import AutoencoderKL, DiffusionPipeline, DDIMScheduler
|
15 |
|
|
|
|
|
16 |
DESCRIPTION = "# SDXL"
|
17 |
if not torch.cuda.is_available():
|
18 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
@@ -27,15 +28,6 @@ ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
|
|
27 |
ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
|
28 |
|
29 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
30 |
-
schedulers = [
|
31 |
-
("LMSDiscreteScheduler", diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler),
|
32 |
-
("DDIMScheduler", diffusers.schedulers.scheduling_ddim.DDIMScheduler),
|
33 |
-
("DPMSolverMultistepScheduler", diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler),
|
34 |
-
("EulerDiscreteScheduler", diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler),
|
35 |
-
("PNDMScheduler", diffusers.schedulers.scheduling_pndm.PNDMScheduler),
|
36 |
-
("DDPMScheduler", diffusers.schedulers.scheduling_ddpm.DDPMScheduler),
|
37 |
-
("EulerAncestralDiscreteScheduler", diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler)
|
38 |
-
]
|
39 |
models = ["cagliostrolab/animagine-xl-3.0"] # Substitua isso pelo valor real do modelo selecionado
|
40 |
|
41 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
|
10 |
import PIL.Image
|
11 |
import spaces
|
12 |
import torch
|
|
|
13 |
from diffusers import AutoencoderKL, DiffusionPipeline, DDIMScheduler
|
14 |
|
15 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
16 |
+
|
17 |
DESCRIPTION = "# SDXL"
|
18 |
if not torch.cuda.is_available():
|
19 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
|
|
28 |
ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
|
29 |
|
30 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
models = ["cagliostrolab/animagine-xl-3.0"] # Substitua isso pelo valor real do modelo selecionado
|
32 |
|
33 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|