LuChengTHU commited on
Commit
7cb93bd
β€’
1 Parent(s): 42d89c8
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -2,6 +2,7 @@ from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeli
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
 
5
 
6
  scheduler = DPMSolverMultistepScheduler(
7
  beta_start=0.00085,
@@ -56,16 +57,18 @@ last_mode = "txt2img"
56
  current_model = models[1]
57
  current_model_path = current_model.path
58
 
 
 
59
  if is_colab:
60
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler)
61
 
62
  else: # download all models
63
- vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
64
  for model in models[1:]:
65
  try:
66
- unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
67
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
68
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
69
  except:
70
  models.remove(model)
71
  pipe = models[1].pipe_t2i
@@ -104,7 +107,7 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
104
  current_model_path = model_path
105
 
106
  if is_colab or current_model == models[0]:
107
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
108
  else:
109
  pipe.to("cpu")
110
  pipe = current_model.pipe_t2i
@@ -135,7 +138,7 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
135
  current_model_path = model_path
136
 
137
  if is_colab or current_model == models[0]:
138
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
139
  else:
140
  pipe.to("cpu")
141
  pipe = current_model.pipe_i2i
@@ -266,19 +269,21 @@ with gr.Blocks(css=css) as demo:
266
  prompt.submit(inference, inputs=inputs, outputs=image_out)
267
  generate.click(inference, inputs=inputs, outputs=image_out)
268
 
 
 
269
  ex = gr.Examples([
270
- [models[1].name, "jason bateman disassembling the demon core", 7.5, 50],
271
- [models[4].name, "portrait of dwayne johnson", 7.0, 75],
272
- [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
273
- [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
274
- [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
275
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
276
 
277
  gr.Markdown('''
278
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@haruu1367](https://twitter.com/haruu1367), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❀️<br>
279
  Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
280
 
281
- ![visitors](https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion)
282
  ''')
283
 
284
  if not is_colab:
 
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
5
+ import os
6
 
7
  scheduler = DPMSolverMultistepScheduler(
8
  beta_start=0.00085,
 
57
  current_model = models[1]
58
  current_model_path = current_model.path
59
 
60
+ auth_token = os.getenv("HUGGING_FACE_HUB_TOKEN")
61
+
62
  if is_colab:
63
+ pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
64
 
65
  else: # download all models
66
+ vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16, use_auth_token=auth_token)
67
  for model in models[1:]:
68
  try:
69
+ unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16, use_auth_token=auth_token)
70
+ model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
71
+ model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler, use_auth_token=auth_token)
72
  except:
73
  models.remove(model)
74
  pipe = models[1].pipe_t2i
 
107
  current_model_path = model_path
108
 
109
  if is_colab or current_model == models[0]:
110
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, use_auth_token=auth_token)
111
  else:
112
  pipe.to("cpu")
113
  pipe = current_model.pipe_t2i
 
138
  current_model_path = model_path
139
 
140
  if is_colab or current_model == models[0]:
141
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, use_auth_token=auth_token)
142
  else:
143
  pipe.to("cpu")
144
  pipe = current_model.pipe_i2i
 
269
  prompt.submit(inference, inputs=inputs, outputs=image_out)
270
  generate.click(inference, inputs=inputs, outputs=image_out)
271
 
272
+
273
+ # TODO: the docs here are wrong.
274
  ex = gr.Examples([
275
+ [models[1+2].name, "jason bateman disassembling the demon core", 7.5, 50],
276
+ [models[4+2].name, "portrait of dwayne johnson", 7.0, 75],
277
+ [models[5+2].name, "portrait of a beautiful alyx vance half life", 10, 50],
278
+ [models[6+2].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
279
+ [models[5+2].name, "fantasy portrait painting, digital art", 4.0, 30],
280
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
281
 
282
  gr.Markdown('''
283
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@haruu1367](https://twitter.com/haruu1367), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❀️<br>
284
  Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
285
 
286
+ ![visitors](https://visitor-badge.glitch.me/badge?page_id=LuChengTHU.dpmsolver_sdm)
287
  ''')
288
 
289
  if not is_colab: