Spaces:
Runtime error
Runtime error
陆鹿
commited on
Commit
•
1906c3c
1
Parent(s):
41b81e6
:bug: remove useless arg
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import pipeline_openvino_stable_diffusion
|
|
7 |
|
8 |
model_id = 'OFA-Sys/small-stable-diffusion-v0'
|
9 |
prefix = ''
|
10 |
-
|
11 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
|
12 |
|
13 |
|
@@ -22,10 +22,9 @@ def error_str(error, title="Error"):
|
|
22 |
return f"""#### {title}
|
23 |
{error}""" if error else ""
|
24 |
|
25 |
-
def inference(prompt, guidance, steps, width=512, height=512, seed=0, neg_prompt=""
|
26 |
|
27 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
28 |
-
prompt = f"{prefix} {prompt}" if auto_prefix else prompt
|
29 |
|
30 |
try:
|
31 |
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
@@ -42,7 +41,7 @@ def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
|
|
42 |
width = width,
|
43 |
height = height,
|
44 |
generator = generator)
|
45 |
-
|
46 |
return result.images[0]
|
47 |
|
48 |
|
@@ -60,12 +59,12 @@ with gr.Blocks(css=css) as demo:
|
|
60 |
|
61 |
</p>
|
62 |
Running on CPUs with <a href="https://github.com/OFA-Sys/diffusion-deploy">diffusion-deploy</a> to speedup the inference.
|
63 |
-
|
64 |
</div>
|
65 |
"""
|
66 |
)
|
67 |
with gr.Row():
|
68 |
-
|
69 |
with gr.Column(scale=55):
|
70 |
with gr.Group():
|
71 |
with gr.Row():
|
@@ -79,7 +78,7 @@ with gr.Blocks(css=css) as demo:
|
|
79 |
with gr.Tab("Options"):
|
80 |
with gr.Group():
|
81 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
82 |
-
|
83 |
|
84 |
with gr.Row():
|
85 |
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
@@ -92,9 +91,9 @@ with gr.Blocks(css=css) as demo:
|
|
92 |
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
93 |
|
94 |
|
95 |
-
|
96 |
|
97 |
-
|
|
|
98 |
outputs = [image_out, error_output]
|
99 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
100 |
generate.click(inference, inputs=inputs, outputs=outputs)
|
|
|
7 |
|
8 |
model_id = 'OFA-Sys/small-stable-diffusion-v0'
|
9 |
prefix = ''
|
10 |
+
|
11 |
scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
|
12 |
|
13 |
|
|
|
22 |
return f"""#### {title}
|
23 |
{error}""" if error else ""
|
24 |
|
25 |
+
def inference(prompt, guidance, steps, width=512, height=512, seed=0, neg_prompt=""):
|
26 |
|
27 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
|
|
28 |
|
29 |
try:
|
30 |
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
|
|
41 |
width = width,
|
42 |
height = height,
|
43 |
generator = generator)
|
44 |
+
|
45 |
return result.images[0]
|
46 |
|
47 |
|
|
|
59 |
|
60 |
</p>
|
61 |
Running on CPUs with <a href="https://github.com/OFA-Sys/diffusion-deploy">diffusion-deploy</a> to speedup the inference.
|
62 |
+
|
63 |
</div>
|
64 |
"""
|
65 |
)
|
66 |
with gr.Row():
|
67 |
+
|
68 |
with gr.Column(scale=55):
|
69 |
with gr.Group():
|
70 |
with gr.Row():
|
|
|
78 |
with gr.Tab("Options"):
|
79 |
with gr.Group():
|
80 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
81 |
+
|
82 |
|
83 |
with gr.Row():
|
84 |
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
|
|
91 |
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
92 |
|
93 |
|
|
|
94 |
|
95 |
+
|
96 |
+
inputs = [prompt, guidance, steps, width, height, seed, neg_prompt]
|
97 |
outputs = [image_out, error_output]
|
98 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
99 |
generate.click(inference, inputs=inputs, outputs=outputs)
|