ChenWu98 commited on
Commit
6da2189
1 Parent(s): 49c69e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -117
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
@@ -6,76 +6,17 @@ import utils
6
 
7
  is_colab = utils.is_google_colab()
8
 
9
- class Model:
10
- def __init__(self, name, path, prefix):
11
- self.name = name
12
- self.path = path
13
- self.prefix = prefix
14
- self.pipe_t2i = None
15
- self.pipe_i2i = None
16
-
17
- models = [
18
- Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
19
- Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
20
- Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
21
- Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
22
- Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
23
- Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
24
- Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
25
- Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
26
- Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
27
- Model("Waifu", "hakurei/waifu-diffusion", ""),
28
- Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
29
- Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
30
- Model("Robo Diffusion", "nousr/robo-diffusion", ""),
31
- Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
32
- Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
33
- ]
34
-
35
- scheduler = DPMSolverMultistepScheduler(
36
- beta_start=0.00085,
37
- beta_end=0.012,
38
- beta_schedule="scaled_linear",
39
- num_train_timesteps=1000,
40
- trained_betas=None,
41
- predict_epsilon=True,
42
- thresholding=False,
43
- algorithm_type="dpmsolver++",
44
- solver_type="midpoint",
45
- lower_order_final=True,
46
- )
47
-
48
- if is_colab:
49
- models.insert(0, Model("Custom model", "", ""))
50
- custom_model = models[0]
51
-
52
- last_mode = "txt2img"
53
- current_model = models[1] if is_colab else models[0]
54
- current_model_path = current_model.path
55
-
56
- if is_colab:
57
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler)
58
-
59
- else: # download all models
60
- vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
61
- for model in models[1:]:
62
- try:
63
- unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
64
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
65
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
66
- except:
67
- models.remove(model)
68
- pipe = models[1].pipe_t2i
69
-
70
  if torch.cuda.is_available():
71
- pipe = pipe.to("cuda")
72
 
73
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
74
 
75
- def custom_model_changed(path):
76
- models[0].path = path
77
- global current_model
78
- current_model = models[0]
79
 
80
  def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", inpaint_image=None):
81
 
@@ -92,43 +33,6 @@ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0
92
  else:
93
  return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, inpaint_image)
94
 
95
- def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None, inpaint_image=None):
96
-
97
- global last_mode
98
- global pipe
99
- global current_model_path
100
- if model_path != current_model_path or last_mode != "txt2img":
101
- current_model_path = model_path
102
-
103
- if is_colab or current_model == custom_model:
104
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
105
- else:
106
- pipe.to("cpu")
107
- pipe = current_model.pipe_t2i
108
-
109
- if torch.cuda.is_available():
110
- pipe = pipe.to("cuda")
111
- last_mode = "txt2img"
112
-
113
- prompt = current_model.prefix + prompt
114
-
115
- if inpaint_image is not None:
116
- init_image = inpaint_image["image"].convert("RGB").resize((width, height))
117
- mask = inpaint_image["mask"].convert("RGB").resize((width, height))
118
-
119
- result = pipe(
120
- prompt,
121
- negative_prompt = neg_prompt,
122
- # num_images_per_prompt=n_images,
123
- image = init_image,
124
- mask_image = mask,
125
- num_inference_steps = int(steps),
126
- guidance_scale = guidance,
127
- width = width,
128
- height = height,
129
- generator = generator)
130
-
131
- return replace_nsfw_images(result)
132
 
133
  def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None):
134
 
@@ -165,12 +69,14 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
165
 
166
  return replace_nsfw_images(result)
167
 
 
168
  def replace_nsfw_images(results):
169
  for i in range(len(results.images)):
170
- if results.nsfw_content_detected[i]:
171
- results.images[i] = Image.open("nsfw.png")
172
  return results.images[0]
173
 
 
174
  css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}.finetuned-diffusion-div p a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
175
  """
176
  with gr.Blocks(css=css) as demo:
@@ -178,13 +84,12 @@ with gr.Blocks(css=css) as demo:
178
  f"""
179
  <div class="finetuned-diffusion-div">
180
  <div>
181
- <h1>Finetuned Diffusion</h1>
182
  </div>
183
  <p>
184
- Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
185
- <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a> + any other custom Diffusers 🧨 SD model hosted on HuggingFace 🤗.
186
  </p>
187
- <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
188
  Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
189
  </p>
190
  </div>
@@ -194,16 +99,11 @@ with gr.Blocks(css=css) as demo:
194
 
195
  with gr.Column(scale=55):
196
  with gr.Group():
197
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
198
- with gr.Box(visible=False) as custom_model_group:
199
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
200
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
201
-
202
  with gr.Row():
203
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
204
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
205
 
206
-
207
  image_out = gr.Image(height=512)
208
  # gallery = gr.Gallery(
209
  # label="Generated images", show_label=False, elem_id="gallery"
@@ -259,5 +159,5 @@ with gr.Blocks(css=css) as demo:
259
  ''')
260
 
261
  if not is_colab:
262
- demo.queue(concurrency_count=1)
263
  demo.launch(debug=is_colab, share=is_colab)
 
1
+ from diffusers import CycleDiffusionPipeline, DDIMScheduler
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
 
6
 
7
  is_colab = utils.is_google_colab()
8
 
9
+ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
10
+ num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False)
11
+
12
+ model_id_or_path = "CompVis/stable-diffusion-v1-4"
13
+ pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
14
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  if torch.cuda.is_available():
16
+ pipe = pipe.to("cuda")
17
 
18
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
19
 
 
 
 
 
20
 
21
  def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", inpaint_image=None):
22
 
 
33
  else:
34
  return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, inpaint_image)
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None):
38
 
 
69
 
70
  return replace_nsfw_images(result)
71
 
72
+
73
  def replace_nsfw_images(results):
74
  for i in range(len(results.images)):
75
+ if results.nsfw_content_detected[i]:
76
+ results.images[i] = Image.open("nsfw.png")
77
  return results.images[0]
78
 
79
+
80
  css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}.finetuned-diffusion-div p a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
81
  """
82
  with gr.Blocks(css=css) as demo:
 
84
  f"""
85
  <div class="finetuned-diffusion-div">
86
  <div>
87
+ <h1>CycleDiffusion with Stable Diffusion</h1>
88
  </div>
89
  <p>
90
+ Demo for CycleDiffusion with Stable Diffusion, built with Diffusers 🧨 by HuggingFace 🤗.
 
91
  </p>
92
+ <p>You can skip the queue in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
93
  Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
94
  </p>
95
  </div>
 
99
 
100
  with gr.Column(scale=55):
101
  with gr.Group():
102
+
 
 
 
 
103
  with gr.Row():
104
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
105
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
106
 
 
107
  image_out = gr.Image(height=512)
108
  # gallery = gr.Gallery(
109
  # label="Generated images", show_label=False, elem_id="gallery"
 
159
  ''')
160
 
161
  if not is_colab:
162
+ demo.queue(concurrency_count=1)
163
  demo.launch(debug=is_colab, share=is_colab)