Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,8 @@ from diffusers import DiffusionPipeline
|
|
7 |
import torch
|
8 |
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
-
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
|
|
11 |
|
12 |
if torch.cuda.is_available():
|
13 |
torch_dtype = torch.float16
|
@@ -66,7 +67,7 @@ css = """
|
|
66 |
|
67 |
with gr.Blocks(css=css) as demo:
|
68 |
with gr.Column(elem_id="col-container"):
|
69 |
-
gr.Markdown(" # Text-to-Image
|
70 |
|
71 |
with gr.Row():
|
72 |
prompt = gr.Text(
|
@@ -82,11 +83,19 @@ with gr.Blocks(css=css) as demo:
|
|
82 |
result = gr.Image(label="Result", show_label=False)
|
83 |
|
84 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
negative_prompt = gr.Text(
|
86 |
label="Negative prompt",
|
87 |
max_lines=1,
|
88 |
placeholder="Enter a negative prompt",
|
89 |
-
visible=
|
90 |
)
|
91 |
|
92 |
seed = gr.Slider(
|
@@ -94,7 +103,7 @@ with gr.Blocks(css=css) as demo:
|
|
94 |
minimum=0,
|
95 |
maximum=MAX_SEED,
|
96 |
step=1,
|
97 |
-
value=
|
98 |
)
|
99 |
|
100 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
@@ -122,7 +131,7 @@ with gr.Blocks(css=css) as demo:
|
|
122 |
minimum=0.0,
|
123 |
maximum=10.0,
|
124 |
step=0.1,
|
125 |
-
value=
|
126 |
)
|
127 |
|
128 |
num_inference_steps = gr.Slider(
|
@@ -130,7 +139,7 @@ with gr.Blocks(css=css) as demo:
|
|
130 |
minimum=1,
|
131 |
maximum=50,
|
132 |
step=1,
|
133 |
-
value=
|
134 |
)
|
135 |
|
136 |
gr.Examples(examples=examples, inputs=[prompt])
|
@@ -139,6 +148,7 @@ with gr.Blocks(css=css) as demo:
|
|
139 |
fn=infer,
|
140 |
inputs=[
|
141 |
prompt,
|
|
|
142 |
negative_prompt,
|
143 |
seed,
|
144 |
randomize_seed,
|
|
|
7 |
import torch
|
8 |
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
#model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
11 |
+
model_repo_id = "CompVis/stable-diffusion-v1-4"
|
12 |
|
13 |
if torch.cuda.is_available():
|
14 |
torch_dtype = torch.float16
|
|
|
67 |
|
68 |
with gr.Blocks(css=css) as demo:
|
69 |
with gr.Column(elem_id="col-container"):
|
70 |
+
gr.Markdown(" # Text-to-Image SemaSci Template")
|
71 |
|
72 |
with gr.Row():
|
73 |
prompt = gr.Text(
|
|
|
83 |
result = gr.Image(label="Result", show_label=False)
|
84 |
|
85 |
with gr.Accordion("Advanced Settings", open=False):
|
86 |
+
model_repo_id = gr.Text(
|
87 |
+
label="Model Id",
|
88 |
+
max_lines=1,
|
89 |
+
placeholder="Choose model",
|
90 |
+
visible=True,
|
91 |
+
container=True,
|
92 |
+
)
|
93 |
+
|
94 |
negative_prompt = gr.Text(
|
95 |
label="Negative prompt",
|
96 |
max_lines=1,
|
97 |
placeholder="Enter a negative prompt",
|
98 |
+
visible=True,
|
99 |
)
|
100 |
|
101 |
seed = gr.Slider(
|
|
|
103 |
minimum=0,
|
104 |
maximum=MAX_SEED,
|
105 |
step=1,
|
106 |
+
value=42,
|
107 |
)
|
108 |
|
109 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
131 |
minimum=0.0,
|
132 |
maximum=10.0,
|
133 |
step=0.1,
|
134 |
+
value=7.0, # Replace with defaults that work for your model
|
135 |
)
|
136 |
|
137 |
num_inference_steps = gr.Slider(
|
|
|
139 |
minimum=1,
|
140 |
maximum=50,
|
141 |
step=1,
|
142 |
+
value=20, # Replace with defaults that work for your model
|
143 |
)
|
144 |
|
145 |
gr.Examples(examples=examples, inputs=[prompt])
|
|
|
148 |
fn=infer,
|
149 |
inputs=[
|
150 |
prompt,
|
151 |
+
model_repo_id,
|
152 |
negative_prompt,
|
153 |
seed,
|
154 |
randomize_seed,
|