Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,11 +3,14 @@ import gradio as gr
|
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
import utils
|
|
|
|
|
6 |
|
|
|
7 |
is_colab = utils.is_google_colab()
|
8 |
|
9 |
class Model:
|
10 |
-
def __init__(self, name, path, prefix):
|
11 |
self.name = name
|
12 |
self.path = path
|
13 |
self.prefix = prefix
|
@@ -17,17 +20,18 @@ class Model:
|
|
17 |
models = [
|
18 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
|
19 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
|
20 |
-
Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
|
21 |
-
Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
|
22 |
Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
|
23 |
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
|
24 |
Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
|
25 |
Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
|
26 |
Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
|
27 |
-
Model("Waifu", "hakurei/waifu-diffusion"
|
28 |
Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
|
29 |
-
Model("
|
30 |
]
|
|
|
|
|
|
|
31 |
# Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
|
32 |
#Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
|
33 |
#Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
|
@@ -48,7 +52,7 @@ scheduler = DPMSolverMultistepScheduler(
|
|
48 |
|
49 |
custom_model = None
|
50 |
if is_colab:
|
51 |
-
models.insert(0, Model("Custom model"
|
52 |
custom_model = models[0]
|
53 |
|
54 |
last_mode = "txt2img"
|
@@ -59,15 +63,16 @@ if is_colab:
|
|
59 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
60 |
|
61 |
else: # download all models
|
|
|
62 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
|
63 |
for model in models:
|
64 |
try:
|
65 |
-
print(f"Downloading {model.name}
|
66 |
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
|
67 |
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
68 |
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
69 |
except Exception as e:
|
70 |
-
print("Failed to load model " + model.name + ": " + str(e))
|
71 |
models.remove(model)
|
72 |
pipe = models[0].pipe_t2i
|
73 |
|
@@ -269,3 +274,5 @@ with gr.Blocks(css=css) as demo:
|
|
269 |
if not is_colab:
|
270 |
demo.queue(concurrency_count=1)
|
271 |
demo.launch(debug=is_colab, share=is_colab)
|
|
|
|
|
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
import utils
|
6 |
+
import datetime
|
7 |
+
import time
|
8 |
|
9 |
+
start_time = time.time()
|
10 |
is_colab = utils.is_google_colab()
|
11 |
|
12 |
class Model:
|
13 |
+
def __init__(self, name, path="", prefix=""):
|
14 |
self.name = name
|
15 |
self.path = path
|
16 |
self.prefix = prefix
|
|
|
20 |
models = [
|
21 |
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
|
22 |
Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
|
|
|
|
|
23 |
Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
|
24 |
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
|
25 |
Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
|
26 |
Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
|
27 |
Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
|
28 |
+
Model("Waifu", "hakurei/waifu-diffusion"),
|
29 |
Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
|
30 |
+
Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2")
|
31 |
]
|
32 |
+
# Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
|
33 |
+
# Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
|
34 |
+
# Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
|
35 |
# Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
|
36 |
#Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
|
37 |
#Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
|
|
|
52 |
|
53 |
custom_model = None
|
54 |
if is_colab:
|
55 |
+
models.insert(0, Model("Custom model"))
|
56 |
custom_model = models[0]
|
57 |
|
58 |
last_mode = "txt2img"
|
|
|
63 |
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
|
64 |
|
65 |
else: # download all models
|
66 |
+
print(f"{datetime.datetime.now()} Downloading vae...")
|
67 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
|
68 |
for model in models:
|
69 |
try:
|
70 |
+
print(f"{datetime.datetime.now()} Downloading {model.name}...")
|
71 |
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
|
72 |
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
73 |
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
74 |
except Exception as e:
|
75 |
+
print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e))
|
76 |
models.remove(model)
|
77 |
pipe = models[0].pipe_t2i
|
78 |
|
|
|
274 |
if not is_colab:
|
275 |
demo.queue(concurrency_count=1)
|
276 |
demo.launch(debug=is_colab, share=is_colab)
|
277 |
+
|
278 |
+
print(f"Space built in {time.time() - start_time:.2f} seconds")
|