Spaces:
Runtime error
Runtime error
Commit
•
bc7cf63
1
Parent(s):
22647d4
Update app.py
Browse files
app.py
CHANGED
@@ -5,9 +5,9 @@ from diffusers import DiffusionPipeline
|
|
5 |
# Load model and scheduler
|
6 |
ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
7 |
|
8 |
-
def generate_image(prompt, negative_prompt="Low quality", width=512, height=512
|
9 |
# Run pipeline in inference (sample random noise and denoise)
|
10 |
-
images = ldm([prompt], num_inference_steps=
|
11 |
# Resize image to desired width and height
|
12 |
resized_images = [image.resize((width, height)) for image in images]
|
13 |
# Save images
|
@@ -18,35 +18,13 @@ def generate_image(prompt, negative_prompt="Low quality", width=512, height=512,
|
|
18 |
# Define the interface
|
19 |
iface = gr.Interface(
|
20 |
fn=generate_image,
|
21 |
-
inputs=[
|
22 |
-
gr.inputs.Textbox(label="Prompt", lines=2),
|
23 |
-
gr.inputs.Textbox(label="Negative Prompt (optional)", lines=2),
|
24 |
-
gr.inputs.Number(label="Width (optional)", default=512),
|
25 |
-
gr.inputs.Number(label="Height (optional)", default=512),
|
26 |
-
gr.inputs.Number(label="Number of Inference Steps (optional)", default=50),
|
27 |
-
gr.inputs.Number(label="Eta (optional)", default=0.3),
|
28 |
-
gr.inputs.Number(label="Guidance Scale (optional)", default=6),
|
29 |
-
],
|
30 |
outputs="text",
|
31 |
layout="vertical",
|
32 |
title="Image Generation",
|
33 |
-
description="Generate images based on prompts.
|
34 |
-
article="
|
35 |
-
examples=[
|
36 |
-
["A painting of a squirrel eating a burger", "Low quality", 512, 512, 50, 0.3, 6],
|
37 |
-
["A breathtaking landscape with mountains", "Blurry", 800, 600, 30, 0.5, 8],
|
38 |
-
["An abstract artwork with vibrant colors", "Dull", 1024, 768, 70, 0.2, 10],
|
39 |
-
],
|
40 |
-
)
|
41 |
-
|
42 |
-
# Configure styling options
|
43 |
-
iface.configure(
|
44 |
-
label_font="Arial",
|
45 |
-
label_font_size=18,
|
46 |
-
border_width=2,
|
47 |
-
border_color="blue",
|
48 |
-
button_bg_color="lightblue",
|
49 |
-
button_text_color="black",
|
50 |
)
|
51 |
|
52 |
# Launch the interface
|
|
|
5 |
# Load model and scheduler
|
6 |
ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
7 |
|
8 |
+
def generate_image(prompt, negative_prompt="Low quality", width=512, height=512):
|
9 |
# Run pipeline in inference (sample random noise and denoise)
|
10 |
+
images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6, negative_prompts=[negative_prompt]).images
|
11 |
# Resize image to desired width and height
|
12 |
resized_images = [image.resize((width, height)) for image in images]
|
13 |
# Save images
|
|
|
18 |
# Define the interface
|
19 |
iface = gr.Interface(
|
20 |
fn=generate_image,
|
21 |
+
inputs=["text", "text", "number", "number"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
outputs="text",
|
23 |
layout="vertical",
|
24 |
title="Image Generation",
|
25 |
+
description="Generate images based on prompts.",
|
26 |
+
article="For more information, visit the documentation: [link](https://docs.gradio.app/)",
|
27 |
+
examples=[["A painting of a squirrel eating a burger", "Low quality", 512, 512]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
)
|
29 |
|
30 |
# Launch the interface
|