patrickvonplaten commited on
Commit
716f49f
1 Parent(s): 2bd5f25
Files changed (1) hide show
  1. app.py +274 -244
app.py CHANGED
@@ -1,244 +1,216 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
 
 
 
 
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
5
- import utils
6
- import datetime
7
  import time
8
  import psutil
9
  import random
10
 
11
 
12
  start_time = time.time()
13
- is_colab = utils.is_google_colab()
14
- state = None
15
  current_steps = 25
16
 
 
17
  class Model:
18
  def __init__(self, name, path=""):
19
  self.name = name
20
  self.path = path
21
- self.pipe_t2i = None
22
- self.pipe_i2i = None
23
-
24
- models = [
25
- Model("2.2", "darkstorm2150/Protogen_v2.2_Official_Release"),
26
- Model("3.4", "darkstorm2150/Protogen_x3.4_Official_Release"),
27
- Model("5.3", "darkstorm2150/Protogen_v5.3_Official_Release"),
28
- Model("5.8", "darkstorm2150/Protogen_x5.8_Official_Release"),
29
- Model("Dragon", "darkstorm2150/Protogen_Dragon_Official_Release"),
30
- ]
31
-
32
- custom_model = None
33
- if is_colab:
34
- models.insert(0, Model("Custom model"))
35
- custom_model = models[0]
36
-
37
- last_mode = "txt2img"
38
- current_model = models[1] if is_colab else models[0]
39
- current_model_path = current_model.path
40
-
41
- if is_colab:
42
- pipe = StableDiffusionPipeline.from_pretrained(
43
- current_model.path,
44
- torch_dtype=torch.float16,
45
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
46
- safety_checker=lambda images, clip_input: (images, False)
47
- )
48
-
49
- else:
50
- pipe = StableDiffusionPipeline.from_pretrained(
51
- current_model.path,
52
- torch_dtype=torch.float16,
53
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
54
- )
55
-
56
- if torch.cuda.is_available():
57
- pipe = pipe.to("cuda")
58
- pipe.enable_xformers_memory_efficient_attention()
59
-
60
- device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
61
-
62
- def error_str(error, title="Error"):
63
- return f"""#### {title}
64
- {error}""" if error else ""
65
-
66
- def update_state(new_state):
67
- global state
68
- state = new_state
69
-
70
- def update_state_info(old_state):
71
- if state and state != old_state:
72
- return gr.update(value=state)
73
-
74
- def custom_model_changed(path):
75
- models[0].path = path
76
- global current_model
77
- current_model = models[0]
78
-
79
- def on_model_change(model_name):
80
-
81
- prefix = "Enter prefix"
82
-
83
- return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
84
-
85
- def on_steps_change(steps):
86
- global current_steps
87
- current_steps = steps
88
 
89
- def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
90
- update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
91
-
92
- def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
93
-
94
- update_state(" ")
 
 
 
 
 
 
 
95
 
96
- print(psutil.virtual_memory()) # print memory usage
97
 
98
- global current_model
99
- for model in models:
100
- if model.name == model_name:
101
- current_model = model
102
- model_path = current_model.path
 
 
103
 
104
- # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
105
- if seed == 0:
106
- seed = random.randint(0, 2147483647)
107
 
108
- generator = torch.Generator('cuda').manual_seed(seed)
109
 
110
- try:
111
- if img is not None:
112
- return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
113
- else:
114
- return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
115
- except Exception as e:
116
- return None, error_str(e)
117
 
118
- def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
 
 
119
 
120
- print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
121
 
122
- global last_mode
123
- global pipe
124
- global current_model_path
125
- if model_path != current_model_path or last_mode != "txt2img":
126
- current_model_path = model_path
 
 
127
 
128
- update_state(f"Loading {current_model.name} text-to-image model...")
129
 
130
- if is_colab or current_model == custom_model:
131
- pipe = StableDiffusionPipeline.from_pretrained(
132
- current_model_path,
133
- torch_dtype=torch.float16,
134
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
135
- safety_checker=lambda images, clip_input: (images, False)
136
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  else:
138
- pipe = StableDiffusionPipeline.from_pretrained(
139
- current_model_path,
140
- torch_dtype=torch.float16,
141
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
142
- )
143
- # pipe = pipe.to("cpu")
144
- # pipe = current_model.pipe_t2i
145
-
146
- if torch.cuda.is_available():
147
- pipe = pipe.to("cuda")
148
- pipe.enable_xformers_memory_efficient_attention()
149
- last_mode = "txt2img"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
  result = pipe(
152
- prompt,
153
- negative_prompt = neg_prompt,
154
- num_images_per_prompt=n_images,
155
- num_inference_steps = int(steps),
156
- guidance_scale = guidance,
157
- width = width,
158
- height = height,
159
- generator = generator,
160
- callback=pipe_callback)
161
-
162
- # update_state(f"Done. Seed: {seed}")
163
-
164
- return replace_nsfw_images(result)
165
-
166
- def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
167
 
168
- print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
169
 
170
- global last_mode
171
- global pipe
172
- global current_model_path
173
- if model_path != current_model_path or last_mode != "img2img":
174
- current_model_path = model_path
175
 
176
- update_state(f"Loading {current_model.name} image-to-image model...")
177
 
178
- if is_colab or current_model == custom_model:
179
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
180
- current_model_path,
181
- torch_dtype=torch.float16,
182
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
183
- safety_checker=lambda images, clip_input: (images, False)
184
- )
185
- else:
186
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
187
- current_model_path,
188
- torch_dtype=torch.float16,
189
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
190
- )
191
- # pipe = pipe.to("cpu")
192
- # pipe = current_model.pipe_i2i
193
-
194
- if torch.cuda.is_available():
195
- pipe = pipe.to("cuda")
196
- pipe.enable_xformers_memory_efficient_attention()
197
- last_mode = "img2img"
198
 
199
  ratio = min(height / img.height, width / img.width)
200
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
 
201
  result = pipe(
202
  prompt,
203
- negative_prompt = neg_prompt,
204
  num_images_per_prompt=n_images,
205
- image = img,
206
- num_inference_steps = int(steps),
207
- strength = strength,
208
- guidance_scale = guidance,
209
- # width = width,
210
- # height = height,
211
- generator = generator,
212
- callback=pipe_callback)
213
-
214
- # update_state(f"Done. Seed: {seed}")
215
-
216
  return replace_nsfw_images(result)
217
 
218
- def replace_nsfw_images(results):
219
 
220
- if is_colab:
221
- return results.images
222
-
223
  for i in range(len(results.images)):
224
- if results.nsfw_content_detected[i]:
225
- results.images[i] = Image.open("nsfw.png")
226
  return results.images
227
 
228
- # css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
229
- # """
230
  with gr.Blocks(css="style.css") as demo:
231
  gr.HTML(
232
- f"""
233
  <div class="finetuned-diffusion-div">
234
  <div>
235
  <h1>Protogen Diffusion</h1>
236
  </div>
237
  <p>
238
- Demo for multiple fine-tuned Protogen Stable Diffusion models + in colab notebook you can load any other Diffusers 🧨 SD model hosted on HuggingFace 🤗.
239
- </p>
240
- <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
241
- Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
242
  </p>
243
  <p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
244
  <a style="display:inline-block" href="https://huggingface.co/spaces/patrickvonplaten/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
@@ -246,64 +218,124 @@ with gr.Blocks(css="style.css") as demo:
246
  """
247
  )
248
  with gr.Row():
249
-
250
- with gr.Column(scale=55):
251
- with gr.Group():
252
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
253
- with gr.Box(visible=False) as custom_model_group:
254
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. darkstorm2150/Protogen_x3.4_Official_Release", interactive=True)
255
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
256
-
257
- with gr.Row():
258
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt.").style(container=False)
259
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
260
-
261
-
262
- # image_out = gr.Image(height=512)
263
- gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
264
-
265
- state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
266
- error_output = gr.Markdown()
267
 
268
- with gr.Column(scale=45):
269
- with gr.Tab("Options"):
270
  with gr.Group():
271
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
273
- n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
274
-
275
- with gr.Row():
276
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
277
- steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=75, step=1)
278
-
279
- with gr.Row():
280
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
281
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
282
-
283
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
284
-
285
- with gr.Tab("Image to image"):
286
- with gr.Group():
287
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
288
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
289
-
290
- if is_colab:
291
- model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
292
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
293
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
294
- steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
295
-
296
- inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  outputs = [gallery, error_output]
298
  prompt.submit(inference, inputs=inputs, outputs=outputs)
299
  generate.click(inference, inputs=inputs, outputs=outputs)
300
 
301
- ex = gr.Examples([
302
- [models[2].name, "Brad Pitt with sunglasses, highly realistic", 7.5, 25],
303
- [models[0].name, "portrait of a beautiful alyx vance half life", 10, 25],
304
- ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
 
 
 
 
 
 
305
 
306
- gr.HTML("""
 
307
  <div style="border-top: 1px solid #303030;">
308
  <br>
309
  <p>Models by <a href="https://huggingface.co/darkstorm2150">@darkstorm2150</a> and others. ❤️</p>
@@ -311,12 +343,10 @@ with gr.Blocks(css="style.css") as demo:
311
  <p>Space by: Darkstorm (Victor Espinoza)<br>
312
  <a href="https://www.instagram.com/officialvictorespinoza/">Instagram</a>
313
  </div>
314
- """)
315
-
316
- demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
317
 
318
  print(f"Space built in {time.time() - start_time:.2f} seconds")
319
 
320
- # if not is_colab:
321
  demo.queue(concurrency_count=1)
322
- demo.launch(debug=is_colab, share=is_colab)
 
1
+ from diffusers import (
2
+ StableDiffusionPipeline,
3
+ StableDiffusionImg2ImgPipeline,
4
+ DPMSolverMultistepScheduler,
5
+ )
6
  import gradio as gr
7
  import torch
8
  from PIL import Image
 
 
9
  import time
10
  import psutil
11
  import random
12
 
13
 
14
  start_time = time.time()
 
 
15
  current_steps = 25
16
 
17
+
18
  class Model:
19
  def __init__(self, name, path=""):
20
  self.name = name
21
  self.path = path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ if path != "":
24
+ self.pipe_t2i = StableDiffusionPipeline.from_pretrained(
25
+ path, torch_dtype=torch.float16
26
+ )
27
+ self.pipe_i2i.scheduler = DPMSolverMultistepScheduler.from_config(
28
+ self.pipe_t2i.scheduler.config
29
+ )
30
+ self.pipe_i2i = StableDiffusionImg2ImgPipeline(
31
+ **self.pipe_t2i.components, torch_dtype=torch.float16
32
+ )
33
+ else:
34
+ self.pipe_t2i = None
35
+ self.pipe_i2i = None
36
 
 
37
 
38
+ models = [
39
+ Model("2.2", "darkstorm2150/Protogen_v2.2_Official_Release"),
40
+ Model("3.4", "darkstorm2150/Protogen_x3.4_Official_Release"),
41
+ # Model("5.3", "darkstorm2150/Protogen_v5.3_Official_Release"),
42
+ # Model("5.8", "darkstorm2150/Protogen_x5.8_Official_Release"),
43
+ # Model("Dragon", "darkstorm2150/Protogen_Dragon_Official_Release"),
44
+ ]
45
 
46
+ MODELS = {m.name: m for m in models}
 
 
47
 
48
+ device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
49
 
 
 
 
 
 
 
 
50
 
51
+ # if torch.cuda.is_available():
52
+ # pipe = pipe.to("cuda")
53
+ # pipe.enable_xformers_memory_efficient_attention()
54
 
 
55
 
56
+ def error_str(error, title="Error"):
57
+ return (
58
+ f"""#### {title}
59
+ {error}"""
60
+ if error
61
+ else ""
62
+ )
63
 
 
64
 
65
+ def inference(
66
+ model_name,
67
+ prompt,
68
+ guidance,
69
+ steps,
70
+ n_images=1,
71
+ width=512,
72
+ height=512,
73
+ seed=0,
74
+ img=None,
75
+ strength=0.5,
76
+ neg_prompt="",
77
+ ):
78
+
79
+ print(psutil.virtual_memory()) # print memory usage
80
+
81
+ if seed == 0:
82
+ seed = random.randint(0, 2147483647)
83
+
84
+ generator = torch.Generator("cuda").manual_seed(seed)
85
+
86
+ try:
87
+ if img is not None:
88
+ return (
89
+ img_to_img(
90
+ model_name,
91
+ prompt,
92
+ n_images,
93
+ neg_prompt,
94
+ img,
95
+ strength,
96
+ guidance,
97
+ steps,
98
+ width,
99
+ height,
100
+ generator,
101
+ seed,
102
+ ),
103
+ f"Done. Seed: {seed}",
104
+ )
105
  else:
106
+ return (
107
+ txt_to_img(
108
+ model_name,
109
+ prompt,
110
+ n_images,
111
+ neg_prompt,
112
+ guidance,
113
+ steps,
114
+ width,
115
+ height,
116
+ generator,
117
+ seed,
118
+ ),
119
+ f"Done. Seed: {seed}",
120
+ )
121
+ except Exception as e:
122
+ return None, error_str(e)
123
+
124
+
125
+ def txt_to_img(
126
+ model_name,
127
+ prompt,
128
+ n_images,
129
+ neg_prompt,
130
+ guidance,
131
+ steps,
132
+ width,
133
+ height,
134
+ generator,
135
+ seed,
136
+ ):
137
+ pipe = MODELS[model_name].pipe_t2i
138
+
139
+ if torch.cuda.is_available():
140
+ pipe = pipe.to("cuda")
141
+ pipe.enable_xformers_memory_efficient_attention()
142
 
143
  result = pipe(
144
+ prompt,
145
+ negative_prompt=neg_prompt,
146
+ num_images_per_prompt=n_images,
147
+ num_inference_steps=int(steps),
148
+ guidance_scale=guidance,
149
+ width=width,
150
+ height=height,
151
+ generator=generator,
152
+ )
 
 
 
 
 
 
153
 
154
+ pipe.to("cpu")
155
 
156
+ return replace_nsfw_images(result)
 
 
 
 
157
 
 
158
 
159
+ def img_to_img(
160
+ model_name,
161
+ prompt,
162
+ n_images,
163
+ neg_prompt,
164
+ img,
165
+ strength,
166
+ guidance,
167
+ steps,
168
+ width,
169
+ height,
170
+ generator,
171
+ seed,
172
+ ):
173
+ pipe = MODELS[model_name].pipe_i2i
174
+
175
+ if torch.cuda.is_available():
176
+ pipe = pipe.to("cuda")
177
+ pipe.enable_xformers_memory_efficient_attention()
 
178
 
179
  ratio = min(height / img.height, width / img.width)
180
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
181
+
182
  result = pipe(
183
  prompt,
184
+ negative_prompt=neg_prompt,
185
  num_images_per_prompt=n_images,
186
+ image=img,
187
+ num_inference_steps=int(steps),
188
+ strength=strength,
189
+ guidance_scale=guidance,
190
+ generator=generator,
191
+ )
192
+
193
+ pipe.to("cpu")
194
+
 
 
195
  return replace_nsfw_images(result)
196
 
 
197
 
198
+ def replace_nsfw_images(results):
 
 
199
  for i in range(len(results.images)):
200
+ if results.nsfw_content_detected[i]:
201
+ results.images[i] = Image.open("nsfw.png")
202
  return results.images
203
 
204
+
 
205
  with gr.Blocks(css="style.css") as demo:
206
  gr.HTML(
207
+ """
208
  <div class="finetuned-diffusion-div">
209
  <div>
210
  <h1>Protogen Diffusion</h1>
211
  </div>
212
  <p>
213
+ Demo for multiple fine-tuned Protogen Stable Diffusion models.
 
 
 
214
  </p>
215
  <p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
216
  <a style="display:inline-block" href="https://huggingface.co/spaces/patrickvonplaten/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
 
218
  """
219
  )
220
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
+ with gr.Column(scale=55):
 
223
  with gr.Group():
224
+ model_name = gr.Dropdown(
225
+ label="Model",
226
+ choices=[m.name for m in models],
227
+ value=models[0].name,
228
+ )
229
+ with gr.Box(visible=False) as custom_model_group:
230
+ custom_model_path = gr.Textbox(
231
+ label="Custom model path",
232
+ placeholder="Path to model, e.g. darkstorm2150/Protogen_x3.4_Official_Release",
233
+ interactive=True,
234
+ )
235
+ gr.HTML(
236
+ "<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>"
237
+ )
238
+
239
+ with gr.Row():
240
+ prompt = gr.Textbox(
241
+ label="Prompt",
242
+ show_label=False,
243
+ max_lines=2,
244
+ placeholder="Enter prompt.",
245
+ ).style(container=False)
246
+ generate = gr.Button(value="Generate").style(
247
+ rounded=(False, True, True, False)
248
+ )
249
+
250
+ # image_out = gr.Image(height=512)
251
+ gallery = gr.Gallery(
252
+ label="Generated images", show_label=False, elem_id="gallery"
253
+ ).style(grid=[2], height="auto")
254
+
255
+ state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(
256
+ container=False
257
+ )
258
+ error_output = gr.Markdown()
259
 
260
+ with gr.Column(scale=45):
261
+ with gr.Tab("Options"):
262
+ with gr.Group():
263
+ neg_prompt = gr.Textbox(
264
+ label="Negative prompt",
265
+ placeholder="What to exclude from the image",
266
+ )
267
+
268
+ n_images = gr.Slider(
269
+ label="Images", value=1, minimum=1, maximum=4, step=1
270
+ )
271
+
272
+ with gr.Row():
273
+ guidance = gr.Slider(
274
+ label="Guidance scale", value=7.5, maximum=15
275
+ )
276
+ steps = gr.Slider(
277
+ label="Steps",
278
+ value=current_steps,
279
+ minimum=2,
280
+ maximum=75,
281
+ step=1,
282
+ )
283
+
284
+ with gr.Row():
285
+ width = gr.Slider(
286
+ label="Width", value=512, minimum=64, maximum=1024, step=8
287
+ )
288
+ height = gr.Slider(
289
+ label="Height", value=512, minimum=64, maximum=1024, step=8
290
+ )
291
+
292
+ seed = gr.Slider(
293
+ 0, 2147483647, label="Seed (0 = random)", value=0, step=1
294
+ )
295
+
296
+ with gr.Tab("Image to image"):
297
+ with gr.Group():
298
+ image = gr.Image(
299
+ label="Image", height=256, tool="editor", type="pil"
300
+ )
301
+ strength = gr.Slider(
302
+ label="Transformation strength",
303
+ minimum=0,
304
+ maximum=1,
305
+ step=0.01,
306
+ value=0.5,
307
+ )
308
+
309
+ inputs = [
310
+ model_name,
311
+ prompt,
312
+ guidance,
313
+ steps,
314
+ n_images,
315
+ width,
316
+ height,
317
+ seed,
318
+ image,
319
+ strength,
320
+ neg_prompt,
321
+ ]
322
  outputs = [gallery, error_output]
323
  prompt.submit(inference, inputs=inputs, outputs=outputs)
324
  generate.click(inference, inputs=inputs, outputs=outputs)
325
 
326
+ ex = gr.Examples(
327
+ [
328
+ [models[2].name, "Brad Pitt with sunglasses, highly realistic", 7.5, 25],
329
+ [models[0].name, "portrait of a beautiful alyx vance half life", 10, 25],
330
+ ],
331
+ inputs=[model_name, prompt, guidance, steps],
332
+ outputs=outputs,
333
+ fn=inference,
334
+ cache_examples=False,
335
+ )
336
 
337
+ gr.HTML(
338
+ """
339
  <div style="border-top: 1px solid #303030;">
340
  <br>
341
  <p>Models by <a href="https://huggingface.co/darkstorm2150">@darkstorm2150</a> and others. ❤️</p>
 
343
  <p>Space by: Darkstorm (Victor Espinoza)<br>
344
  <a href="https://www.instagram.com/officialvictorespinoza/">Instagram</a>
345
  </div>
346
+ """
347
+ )
 
348
 
349
  print(f"Space built in {time.time() - start_time:.2f} seconds")
350
 
 
351
  demo.queue(concurrency_count=1)
352
+ demo.launch()