SUPERSHANKY commited on
Commit
74d13e4
1 Parent(s): 127a851

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -58
app.py CHANGED
@@ -8,12 +8,10 @@ import time
8
  import psutil
9
  import random
10
 
11
-
12
  start_time = time.time()
13
  is_colab = utils.is_google_colab()
14
  state = None
15
  current_steps = 25
16
-
17
  class Model:
18
  def __init__(self, name, path="", prefix=""):
19
  self.name = name
@@ -21,7 +19,6 @@ class Model:
21
  self.prefix = prefix
22
  self.pipe_t2i = None
23
  self.pipe_i2i = None
24
-
25
  models = [
26
  Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
27
  Model("Dreamlike Photoreal 2.0", "dreamlike-art/dreamlike-photoreal-2.0", ""),
@@ -121,20 +118,14 @@ models = [
121
  Model("Realistic_Vision_V1.4", "SG161222/Realistic_Vision_V1.4", ""),
122
 
123
 
124
-
125
-
126
-
127
  ]
128
-
129
  custom_model = None
130
  if is_colab:
131
  models.insert(0, Model("Custom model"))
132
  custom_model = models[0]
133
-
134
  last_mode = "txt2img"
135
  current_model = models[1] if is_colab else models[0]
136
  current_model_path = current_model.path
137
-
138
  if is_colab:
139
  pipe = StableDiffusionPipeline.from_pretrained(
140
  current_model.path,
@@ -142,7 +133,6 @@ if is_colab:
142
  scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
143
  safety_checker=None
144
  )
145
-
146
  else:
147
  pipe = StableDiffusionPipeline.from_pretrained(
148
  current_model.path,
@@ -153,57 +143,41 @@ else:
153
  if torch.cuda.is_available():
154
  pipe = pipe.to("cuda")
155
  pipe.enable_xformers_memory_efficient_attention()
156
-
157
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
158
-
159
  def error_str(error, title="Error"):
160
  return f"""#### {title}
161
  {error}""" if error else ""
162
-
163
  def update_state(new_state):
164
  global state
165
  state = new_state
166
-
167
  def update_state_info(old_state):
168
  if state and state != old_state:
169
  return gr.update(value=state)
170
-
171
  def custom_model_changed(path):
172
  models[0].path = path
173
  global current_model
174
  current_model = models[0]
175
-
176
  def on_model_change(model_name):
177
 
178
  prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
179
-
180
  return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
181
-
182
  def on_steps_change(steps):
183
  global current_steps
184
  current_steps = steps
185
-
186
  def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
187
  update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
188
-
189
  def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
190
-
191
  update_state(" ")
192
-
193
  print(psutil.virtual_memory()) # print memory usage
194
-
195
  global current_model
196
  for model in models:
197
  if model.name == model_name:
198
  current_model = model
199
  model_path = current_model.path
200
-
201
  # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
202
  if seed == 0:
203
  seed = random.randint(0, 2147483647)
204
-
205
  generator = torch.Generator('cuda').manual_seed(seed)
206
-
207
  try:
208
  if img is not None:
209
  return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
@@ -211,19 +185,14 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
211
  return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
212
  except Exception as e:
213
  return None, error_str(e)
214
-
215
  def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
216
-
217
  print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
218
-
219
  global last_mode
220
  global pipe
221
  global current_model_path
222
  if model_path != current_model_path or last_mode != "txt2img":
223
  current_model_path = model_path
224
-
225
  update_state(f"Loading {current_model.name} text-to-image model...")
226
-
227
  if is_colab or current_model == custom_model:
228
  pipe = StableDiffusionPipeline.from_pretrained(
229
  current_model_path,
@@ -239,12 +208,10 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
239
  )
240
  # pipe = pipe.to("cpu")
241
  # pipe = current_model.pipe_t2i
242
-
243
  if torch.cuda.is_available():
244
  pipe = pipe.to("cuda")
245
  pipe.enable_xformers_memory_efficient_attention()
246
  last_mode = "txt2img"
247
-
248
  prompt = current_model.prefix + prompt
249
  result = pipe(
250
  prompt,
@@ -256,23 +223,17 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
256
  height = height,
257
  generator = generator,
258
  callback=pipe_callback)
259
-
260
  # update_state(f"Done. Seed: {seed}")
261
 
262
  return replace_nsfw_images(result)
263
-
264
  def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
265
-
266
  print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
267
-
268
  global last_mode
269
  global pipe
270
  global current_model_path
271
  if model_path != current_model_path or last_mode != "img2img":
272
  current_model_path = model_path
273
-
274
  update_state(f"Loading {current_model.name} image-to-image model...")
275
-
276
  if is_colab or current_model == custom_model:
277
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
278
  current_model_path,
@@ -293,7 +254,6 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
293
  pipe = pipe.to("cuda")
294
  pipe.enable_xformers_memory_efficient_attention()
295
  last_mode = "img2img"
296
-
297
  prompt = current_model.prefix + prompt
298
  ratio = min(height / img.height, width / img.width)
299
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
@@ -309,13 +269,10 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
309
  # height = height,
310
  generator = generator,
311
  callback=pipe_callback)
312
-
313
  # update_state(f"Done. Seed: {seed}")
314
 
315
  return replace_nsfw_images(result)
316
-
317
  def replace_nsfw_images(results):
318
-
319
  if is_colab:
320
  return results.images
321
 
@@ -323,7 +280,6 @@ def replace_nsfw_images(results):
323
  if results.nsfw_content_detected[i]:
324
  results.images[i] = Image.open("nsfw.png")
325
  return results.images
326
-
327
  # css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
328
  # """
329
  with gr.Blocks(css="style.css") as demo:
@@ -358,46 +314,36 @@ with gr.Blocks(css="style.css") as demo:
358
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
359
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
360
 
361
-
362
  # image_out = gr.Image(height=512)
363
  gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
364
 
365
  state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
366
  error_output = gr.Markdown()
367
-
368
  with gr.Column(scale=45):
369
  with gr.Tab("Options"):
370
  with gr.Group():
371
  neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
372
-
373
  n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=10, step=1)
374
-
375
  with gr.Row():
376
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
377
  steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=250, step=1)
378
-
379
  with gr.Row():
380
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=2048, step=8)
381
  height = gr.Slider(label="Height", value=512, minimum=64, maximum=2048, step=8)
382
-
383
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
384
-
385
  with gr.Tab("Image to image"):
386
  with gr.Group():
387
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
388
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
389
-
390
  if is_colab:
391
  model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
392
  custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
393
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
394
  steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
395
-
396
  inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
397
  outputs = [gallery, error_output]
398
  prompt.submit(inference, inputs=inputs, outputs=outputs)
399
  generate.click(inference, inputs=inputs, outputs=outputs)
400
-
401
  ex = gr.Examples([
402
  [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25],
403
  [models[4].name, "portrait of dwayne johnson", 7.0, 35],
@@ -405,7 +351,6 @@ with gr.Blocks(css="style.css") as demo:
405
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30],
406
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 20],
407
  ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
408
-
409
  gr.HTML("""
410
  <div style="border-top: 1px solid #303030;">
411
  <br>
@@ -418,11 +363,8 @@ with gr.Blocks(css="style.css") as demo:
418
  <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion" alt="visitors"></p>
419
  </div>
420
  """)
421
-
422
  demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
423
-
424
  print(f"Space built in {time.time() - start_time:.2f} seconds")
425
-
426
  # if not is_colab:
427
  demo.queue(concurrency_count=1)
428
  demo.launch(debug=is_colab, share=True)
 
8
  import psutil
9
  import random
10
 
 
11
  start_time = time.time()
12
  is_colab = utils.is_google_colab()
13
  state = None
14
  current_steps = 25
 
15
  class Model:
16
  def __init__(self, name, path="", prefix=""):
17
  self.name = name
 
19
  self.prefix = prefix
20
  self.pipe_t2i = None
21
  self.pipe_i2i = None
 
22
  models = [
23
  Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
24
  Model("Dreamlike Photoreal 2.0", "dreamlike-art/dreamlike-photoreal-2.0", ""),
 
118
  Model("Realistic_Vision_V1.4", "SG161222/Realistic_Vision_V1.4", ""),
119
 
120
 
 
 
 
121
  ]
 
122
  custom_model = None
123
  if is_colab:
124
  models.insert(0, Model("Custom model"))
125
  custom_model = models[0]
 
126
  last_mode = "txt2img"
127
  current_model = models[1] if is_colab else models[0]
128
  current_model_path = current_model.path
 
129
  if is_colab:
130
  pipe = StableDiffusionPipeline.from_pretrained(
131
  current_model.path,
 
133
  scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
134
  safety_checker=None
135
  )
 
136
  else:
137
  pipe = StableDiffusionPipeline.from_pretrained(
138
  current_model.path,
 
143
  if torch.cuda.is_available():
144
  pipe = pipe.to("cuda")
145
  pipe.enable_xformers_memory_efficient_attention()
 
146
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
 
147
  def error_str(error, title="Error"):
148
  return f"""#### {title}
149
  {error}""" if error else ""
 
150
  def update_state(new_state):
151
  global state
152
  state = new_state
 
153
  def update_state_info(old_state):
154
  if state and state != old_state:
155
  return gr.update(value=state)
 
156
  def custom_model_changed(path):
157
  models[0].path = path
158
  global current_model
159
  current_model = models[0]
 
160
  def on_model_change(model_name):
161
 
162
  prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
 
163
  return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
 
164
  def on_steps_change(steps):
165
  global current_steps
166
  current_steps = steps
 
167
  def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
168
  update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
 
169
  def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
 
170
  update_state(" ")
 
171
  print(psutil.virtual_memory()) # print memory usage
 
172
  global current_model
173
  for model in models:
174
  if model.name == model_name:
175
  current_model = model
176
  model_path = current_model.path
 
177
  # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
178
  if seed == 0:
179
  seed = random.randint(0, 2147483647)
 
180
  generator = torch.Generator('cuda').manual_seed(seed)
 
181
  try:
182
  if img is not None:
183
  return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
 
185
  return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
186
  except Exception as e:
187
  return None, error_str(e)
 
188
  def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
 
189
  print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
 
190
  global last_mode
191
  global pipe
192
  global current_model_path
193
  if model_path != current_model_path or last_mode != "txt2img":
194
  current_model_path = model_path
 
195
  update_state(f"Loading {current_model.name} text-to-image model...")
 
196
  if is_colab or current_model == custom_model:
197
  pipe = StableDiffusionPipeline.from_pretrained(
198
  current_model_path,
 
208
  )
209
  # pipe = pipe.to("cpu")
210
  # pipe = current_model.pipe_t2i
 
211
  if torch.cuda.is_available():
212
  pipe = pipe.to("cuda")
213
  pipe.enable_xformers_memory_efficient_attention()
214
  last_mode = "txt2img"
 
215
  prompt = current_model.prefix + prompt
216
  result = pipe(
217
  prompt,
 
223
  height = height,
224
  generator = generator,
225
  callback=pipe_callback)
 
226
  # update_state(f"Done. Seed: {seed}")
227
 
228
  return replace_nsfw_images(result)
 
229
  def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
 
230
  print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
 
231
  global last_mode
232
  global pipe
233
  global current_model_path
234
  if model_path != current_model_path or last_mode != "img2img":
235
  current_model_path = model_path
 
236
  update_state(f"Loading {current_model.name} image-to-image model...")
 
237
  if is_colab or current_model == custom_model:
238
  pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
239
  current_model_path,
 
254
  pipe = pipe.to("cuda")
255
  pipe.enable_xformers_memory_efficient_attention()
256
  last_mode = "img2img"
 
257
  prompt = current_model.prefix + prompt
258
  ratio = min(height / img.height, width / img.width)
259
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
 
269
  # height = height,
270
  generator = generator,
271
  callback=pipe_callback)
 
272
  # update_state(f"Done. Seed: {seed}")
273
 
274
  return replace_nsfw_images(result)
 
275
  def replace_nsfw_images(results):
 
276
  if is_colab:
277
  return results.images
278
 
 
280
  if results.nsfw_content_detected[i]:
281
  results.images[i] = Image.open("nsfw.png")
282
  return results.images
 
283
  # css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
284
  # """
285
  with gr.Blocks(css="style.css") as demo:
 
314
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
315
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
316
 
 
317
  # image_out = gr.Image(height=512)
318
  gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
319
 
320
  state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
321
  error_output = gr.Markdown()
 
322
  with gr.Column(scale=45):
323
  with gr.Tab("Options"):
324
  with gr.Group():
325
  neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
 
326
  n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=10, step=1)
 
327
  with gr.Row():
328
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
329
  steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=250, step=1)
 
330
  with gr.Row():
331
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=2048, step=8)
332
  height = gr.Slider(label="Height", value=512, minimum=64, maximum=2048, step=8)
 
333
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
 
334
  with gr.Tab("Image to image"):
335
  with gr.Group():
336
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
337
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
 
338
  if is_colab:
339
  model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
340
  custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
341
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
342
  steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
 
343
  inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
344
  outputs = [gallery, error_output]
345
  prompt.submit(inference, inputs=inputs, outputs=outputs)
346
  generate.click(inference, inputs=inputs, outputs=outputs)
 
347
  ex = gr.Examples([
348
  [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25],
349
  [models[4].name, "portrait of dwayne johnson", 7.0, 35],
 
351
  [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30],
352
  [models[5].name, "fantasy portrait painting, digital art", 4.0, 20],
353
  ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
 
354
  gr.HTML("""
355
  <div style="border-top: 1px solid #303030;">
356
  <br>
 
363
  <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion" alt="visitors"></p>
364
  </div>
365
  """)
 
366
  demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
 
367
  print(f"Space built in {time.time() - start_time:.2f} seconds")
 
368
  # if not is_colab:
369
  demo.queue(concurrency_count=1)
370
  demo.launch(debug=is_colab, share=True)