Spaces:
Running
on
Zero
Running
on
Zero
Update
Browse files
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🏢
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
suggested_hardware: t4-small
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.36.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
suggested_hardware: t4-small
|
app.py
CHANGED
@@ -3,11 +3,23 @@
|
|
3 |
from __future__ import annotations
|
4 |
|
5 |
import os
|
|
|
|
|
|
|
6 |
|
7 |
import gradio as gr
|
|
|
|
|
8 |
import torch
|
|
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
DESCRIPTION = "# [Multiresolution Textual Inversion](https://github.com/giannisdaras/multires_textual_inversion)"
|
13 |
|
@@ -20,9 +32,39 @@ For this demo, only `<jane>`, `<gta5-artwork>` and `<cat-toy>` are available.
|
|
20 |
Also, `number` should be an integer in [0, 9].
|
21 |
"""
|
22 |
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
model = Model()
|
26 |
|
27 |
with gr.Blocks(css="style.css") as demo:
|
28 |
gr.Markdown(DESCRIPTION)
|
@@ -40,18 +82,24 @@ with gr.Blocks(css="style.css") as demo:
|
|
40 |
value=1,
|
41 |
)
|
42 |
with gr.Row():
|
43 |
-
num_steps = gr.Slider(
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
with gr.Row():
|
45 |
seed = gr.Slider(label="Seed", minimum=0, maximum=100000, step=1, value=100)
|
46 |
with gr.Row():
|
47 |
-
run_button = gr.Button(
|
48 |
|
49 |
with gr.Column():
|
50 |
result = gr.Gallery(label="Result", object_fit="scale-down")
|
51 |
|
52 |
with gr.Row():
|
53 |
with gr.Group():
|
54 |
-
fn = lambda x:
|
55 |
with gr.Row():
|
56 |
gr.Examples(
|
57 |
label="Examples 1",
|
@@ -64,7 +112,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
64 |
inputs=prompt,
|
65 |
outputs=result,
|
66 |
fn=fn,
|
67 |
-
cache_examples=CACHE_EXAMPLES,
|
68 |
)
|
69 |
with gr.Row():
|
70 |
gr.Examples(
|
@@ -78,7 +125,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
78 |
inputs=prompt,
|
79 |
outputs=result,
|
80 |
fn=fn,
|
81 |
-
cache_examples=CACHE_EXAMPLES,
|
82 |
)
|
83 |
with gr.Row():
|
84 |
gr.Examples(
|
@@ -91,7 +137,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
91 |
inputs=prompt,
|
92 |
outputs=result,
|
93 |
fn=fn,
|
94 |
-
cache_examples=CACHE_EXAMPLES,
|
95 |
)
|
96 |
|
97 |
inputs = [
|
@@ -101,13 +146,13 @@ with gr.Blocks(css="style.css") as demo:
|
|
101 |
seed,
|
102 |
]
|
103 |
prompt.submit(
|
104 |
-
fn=
|
105 |
inputs=inputs,
|
106 |
outputs=result,
|
107 |
api_name=False,
|
108 |
)
|
109 |
run_button.click(
|
110 |
-
fn=
|
111 |
inputs=inputs,
|
112 |
outputs=result,
|
113 |
api_name="run",
|
@@ -116,4 +161,5 @@ with gr.Blocks(css="style.css") as demo:
|
|
116 |
with gr.Accordion("About available prompts", open=False):
|
117 |
gr.Markdown(DETAILS)
|
118 |
|
119 |
-
|
|
|
|
3 |
from __future__ import annotations
|
4 |
|
5 |
import os
|
6 |
+
import shlex
|
7 |
+
import subprocess
|
8 |
+
import sys
|
9 |
|
10 |
import gradio as gr
|
11 |
+
import PIL.Image
|
12 |
+
import spaces
|
13 |
import torch
|
14 |
+
from diffusers import DPMSolverMultistepScheduler
|
15 |
|
16 |
+
if os.getenv("SYSTEM") == "spaces":
|
17 |
+
with open("patch") as f:
|
18 |
+
subprocess.run(shlex.split("patch -p1"), cwd="multires_textual_inversion", stdin=f)
|
19 |
+
|
20 |
+
sys.path.insert(0, "multires_textual_inversion")
|
21 |
+
|
22 |
+
from pipeline import MultiResPipeline, load_learned_concepts
|
23 |
|
24 |
DESCRIPTION = "# [Multiresolution Textual Inversion](https://github.com/giannisdaras/multires_textual_inversion)"
|
25 |
|
|
|
32 |
Also, `number` should be an integer in [0, 9].
|
33 |
"""
|
34 |
|
35 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
36 |
+
|
37 |
+
model_id = "runwayml/stable-diffusion-v1-5"
|
38 |
+
if device.type == "cpu":
|
39 |
+
pipe = MultiResPipeline.from_pretrained(model_id)
|
40 |
+
else:
|
41 |
+
pipe = MultiResPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16")
|
42 |
+
pipe = pipe.to(device)
|
43 |
+
pipe.scheduler = DPMSolverMultistepScheduler(
|
44 |
+
beta_start=0.00085,
|
45 |
+
beta_end=0.012,
|
46 |
+
beta_schedule="scaled_linear",
|
47 |
+
num_train_timesteps=1000,
|
48 |
+
trained_betas=None,
|
49 |
+
prediction_type="epsilon",
|
50 |
+
thresholding=False,
|
51 |
+
algorithm_type="dpmsolver++",
|
52 |
+
solver_type="midpoint",
|
53 |
+
lower_order_final=True,
|
54 |
+
)
|
55 |
+
string_to_param_dict = load_learned_concepts(pipe, "textual_inversion_outputs/")
|
56 |
+
|
57 |
+
|
58 |
+
@spaces.GPU
|
59 |
+
def run(prompt: str, n_images: int, n_steps: int, seed: int) -> list[PIL.Image.Image]:
|
60 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
61 |
+
return pipe(
|
62 |
+
[prompt] * n_images,
|
63 |
+
string_to_param_dict,
|
64 |
+
num_inference_steps=n_steps,
|
65 |
+
generator=generator,
|
66 |
+
)
|
67 |
|
|
|
68 |
|
69 |
with gr.Blocks(css="style.css") as demo:
|
70 |
gr.Markdown(DESCRIPTION)
|
|
|
82 |
value=1,
|
83 |
)
|
84 |
with gr.Row():
|
85 |
+
num_steps = gr.Slider(
|
86 |
+
label="Number of inference steps",
|
87 |
+
minimum=1,
|
88 |
+
maximum=50,
|
89 |
+
step=1,
|
90 |
+
value=10,
|
91 |
+
)
|
92 |
with gr.Row():
|
93 |
seed = gr.Slider(label="Seed", minimum=0, maximum=100000, step=1, value=100)
|
94 |
with gr.Row():
|
95 |
+
run_button = gr.Button()
|
96 |
|
97 |
with gr.Column():
|
98 |
result = gr.Gallery(label="Result", object_fit="scale-down")
|
99 |
|
100 |
with gr.Row():
|
101 |
with gr.Group():
|
102 |
+
fn = lambda x: run(x, 2, 10, 100)
|
103 |
with gr.Row():
|
104 |
gr.Examples(
|
105 |
label="Examples 1",
|
|
|
112 |
inputs=prompt,
|
113 |
outputs=result,
|
114 |
fn=fn,
|
|
|
115 |
)
|
116 |
with gr.Row():
|
117 |
gr.Examples(
|
|
|
125 |
inputs=prompt,
|
126 |
outputs=result,
|
127 |
fn=fn,
|
|
|
128 |
)
|
129 |
with gr.Row():
|
130 |
gr.Examples(
|
|
|
137 |
inputs=prompt,
|
138 |
outputs=result,
|
139 |
fn=fn,
|
|
|
140 |
)
|
141 |
|
142 |
inputs = [
|
|
|
146 |
seed,
|
147 |
]
|
148 |
prompt.submit(
|
149 |
+
fn=run,
|
150 |
inputs=inputs,
|
151 |
outputs=result,
|
152 |
api_name=False,
|
153 |
)
|
154 |
run_button.click(
|
155 |
+
fn=run,
|
156 |
inputs=inputs,
|
157 |
outputs=result,
|
158 |
api_name="run",
|
|
|
161 |
with gr.Accordion("About available prompts", open=False):
|
162 |
gr.Markdown(DETAILS)
|
163 |
|
164 |
+
if __name__ == "__main__":
|
165 |
+
demo.queue(max_size=20).launch()
|
model.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import os
|
4 |
-
import shlex
|
5 |
-
import subprocess
|
6 |
-
import sys
|
7 |
-
|
8 |
-
import PIL.Image
|
9 |
-
import torch
|
10 |
-
from diffusers import DPMSolverMultistepScheduler
|
11 |
-
|
12 |
-
if os.getenv("SYSTEM") == "spaces":
|
13 |
-
with open("patch") as f:
|
14 |
-
subprocess.run(shlex.split("patch -p1"), cwd="multires_textual_inversion", stdin=f)
|
15 |
-
|
16 |
-
sys.path.insert(0, "multires_textual_inversion")
|
17 |
-
|
18 |
-
from pipeline import MultiResPipeline, load_learned_concepts
|
19 |
-
|
20 |
-
|
21 |
-
class Model:
|
22 |
-
def __init__(self):
|
23 |
-
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
25 |
-
if self.device.type == "cpu":
|
26 |
-
pipe = MultiResPipeline.from_pretrained(model_id)
|
27 |
-
else:
|
28 |
-
pipe = MultiResPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16")
|
29 |
-
self.pipe = pipe.to(self.device)
|
30 |
-
self.pipe.scheduler = DPMSolverMultistepScheduler(
|
31 |
-
beta_start=0.00085,
|
32 |
-
beta_end=0.012,
|
33 |
-
beta_schedule="scaled_linear",
|
34 |
-
num_train_timesteps=1000,
|
35 |
-
trained_betas=None,
|
36 |
-
predict_epsilon=True,
|
37 |
-
thresholding=False,
|
38 |
-
algorithm_type="dpmsolver++",
|
39 |
-
solver_type="midpoint",
|
40 |
-
lower_order_final=True,
|
41 |
-
)
|
42 |
-
self.string_to_param_dict = load_learned_concepts(self.pipe, "textual_inversion_outputs/")
|
43 |
-
|
44 |
-
def run(self, prompt: str, n_images: int, n_steps: int, seed: int) -> list[PIL.Image.Image]:
|
45 |
-
generator = torch.Generator(device=self.device).manual_seed(seed)
|
46 |
-
return self.pipe(
|
47 |
-
[prompt] * n_images, self.string_to_param_dict, num_inference_steps=n_steps, generator=generator
|
48 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
accelerate==0.
|
2 |
-
diffusers==0.
|
3 |
-
ftfy==6.
|
4 |
-
Pillow==10.
|
5 |
-
torch==
|
6 |
-
transformers==4.
|
|
|
1 |
+
accelerate==0.31.0
|
2 |
+
diffusers==0.28.2
|
3 |
+
ftfy==6.2.0
|
4 |
+
Pillow==10.3.0
|
5 |
+
torch==2.0.1
|
6 |
+
transformers==4.41.2
|