Cletrason commited on
Commit
ba0418b
1 Parent(s): 5a2859f

Delete app (1).py

Browse files
Files changed (1) hide show
  1. app (1).py +0 -349
app (1).py DELETED
@@ -1,349 +0,0 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
- import utils
6
- import datetime
7
- import time
8
- import psutil
9
- import random
10
-
11
-
12
- start_time = time.time()
13
- is_colab = utils.is_google_colab()
14
- state = None
15
- current_steps = 25
16
-
17
- class Model:
18
- def __init__(self, name, path="", prefix=""):
19
- self.name = name
20
- self.path = path
21
- self.prefix = prefix
22
- self.pipe_t2i = None
23
- self.pipe_i2i = None
24
-
25
- models = [
26
- Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
27
- Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
28
- Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
29
- Model("Anything V4", "andite/anything-v4.0", ""),
30
- Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
31
- Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
32
- Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
33
- Model("Wavyfusion", "wavymulder/wavyfusion", "wa-vy style "),
34
- Model("Analog Diffusion", "wavymulder/Analog-Diffusion", "analog style "),
35
- Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
36
- Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
37
- Model("Waifu", "hakurei/waifu-diffusion"),
38
- Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
39
- Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
40
- Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"),
41
- Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
42
- Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
43
- Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy "),
44
- Model("Pokémon", "lambdalabs/sd-pokemon-diffusers"),
45
- Model("Pony Diffusion", "AstraliteHeart/pony-diffusion"),
46
- Model("Robo Diffusion", "nousr/robo-diffusion"),
47
- Model("Epic Diffusion", "johnslegers/epic-diffusion")
48
- ]
49
-
50
- custom_model = None
51
- if is_colab:
52
- models.insert(0, Model("Custom model"))
53
- custom_model = models[0]
54
-
55
- last_mode = "txt2img"
56
- current_model = models[1] if is_colab else models[0]
57
- current_model_path = current_model.path
58
-
59
- if is_colab:
60
- pipe = StableDiffusionPipeline.from_pretrained(
61
- current_model.path,
62
- torch_dtype=torch.float16,
63
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
64
- safety_checker=lambda images, clip_input: (images, False)
65
- )
66
-
67
- else:
68
- pipe = StableDiffusionPipeline.from_pretrained(
69
- current_model.path,
70
- torch_dtype=torch.float16,
71
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
72
- )
73
-
74
- if torch.cuda.is_available():
75
- pipe = pipe.to("cuda")
76
- pipe.enable_xformers_memory_efficient_attention()
77
-
78
- device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
79
-
80
- def error_str(error, title="Error"):
81
- return f"""#### {title}
82
- {error}""" if error else ""
83
-
84
- def update_state(new_state):
85
- global state
86
- state = new_state
87
-
88
- def update_state_info(old_state):
89
- if state and state != old_state:
90
- return gr.update(value=state)
91
-
92
- def custom_model_changed(path):
93
- models[0].path = path
94
- global current_model
95
- current_model = models[0]
96
-
97
- def on_model_change(model_name):
98
-
99
- prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
100
-
101
- return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
102
-
103
- def on_steps_change(steps):
104
- global current_steps
105
- current_steps = steps
106
-
107
- def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
108
- update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
109
-
110
- def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
111
-
112
- update_state(" ")
113
-
114
- print(psutil.virtual_memory()) # print memory usage
115
-
116
- global current_model
117
- for model in models:
118
- if model.name == model_name:
119
- current_model = model
120
- model_path = current_model.path
121
-
122
- # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
123
- if seed == 0:
124
- seed = random.randint(0, 2147483647)
125
-
126
- generator = torch.Generator('cuda').manual_seed(seed)
127
-
128
- try:
129
- if img is not None:
130
- return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
131
- else:
132
- return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
133
- except Exception as e:
134
- return None, error_str(e)
135
-
136
- def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
137
-
138
- print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
139
-
140
- global last_mode
141
- global pipe
142
- global current_model_path
143
- if model_path != current_model_path or last_mode != "txt2img":
144
- current_model_path = model_path
145
-
146
- update_state(f"Loading {current_model.name} text-to-image model...")
147
-
148
- if is_colab or current_model == custom_model:
149
- pipe = StableDiffusionPipeline.from_pretrained(
150
- current_model_path,
151
- torch_dtype=torch.float16,
152
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
153
- safety_checker=lambda images, clip_input: (images, False)
154
- )
155
- else:
156
- pipe = StableDiffusionPipeline.from_pretrained(
157
- current_model_path,
158
- torch_dtype=torch.float16,
159
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
160
- )
161
- # pipe = pipe.to("cpu")
162
- # pipe = current_model.pipe_t2i
163
-
164
- if torch.cuda.is_available():
165
- pipe = pipe.to("cuda")
166
- pipe.enable_xformers_memory_efficient_attention()
167
- last_mode = "txt2img"
168
-
169
- prompt = current_model.prefix + prompt
170
- result = pipe(
171
- prompt,
172
- negative_prompt = neg_prompt,
173
- num_images_per_prompt=n_images,
174
- num_inference_steps = int(steps),
175
- guidance_scale = guidance,
176
- width = width,
177
- height = height,
178
- generator = generator,
179
- callback=pipe_callback)
180
-
181
- # update_state(f"Done. Seed: {seed}")
182
-
183
- return replace_nsfw_images(result)
184
-
185
- def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
186
-
187
- print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
188
-
189
- global last_mode
190
- global pipe
191
- global current_model_path
192
- if model_path != current_model_path or last_mode != "img2img":
193
- current_model_path = model_path
194
-
195
- update_state(f"Loading {current_model.name} image-to-image model...")
196
-
197
- if is_colab or current_model == custom_model:
198
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
199
- current_model_path,
200
- torch_dtype=torch.float16,
201
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
202
- safety_checker=lambda images, clip_input: (images, False)
203
- )
204
- else:
205
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
206
- current_model_path,
207
- torch_dtype=torch.float16,
208
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
209
- )
210
- # pipe = pipe.to("cpu")
211
- # pipe = current_model.pipe_i2i
212
-
213
- if torch.cuda.is_available():
214
- pipe = pipe.to("cuda")
215
- pipe.enable_xformers_memory_efficient_attention()
216
- last_mode = "img2img"
217
-
218
- prompt = current_model.prefix + prompt
219
- ratio = min(height / img.height, width / img.width)
220
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
221
- result = pipe(
222
- prompt,
223
- negative_prompt = neg_prompt,
224
- num_images_per_prompt=n_images,
225
- image = img,
226
- num_inference_steps = int(steps),
227
- strength = strength,
228
- guidance_scale = guidance,
229
- # width = width,
230
- # height = height,
231
- generator = generator,
232
- callback=pipe_callback)
233
-
234
- # update_state(f"Done. Seed: {seed}")
235
-
236
- return replace_nsfw_images(result)
237
-
238
- def replace_nsfw_images(results):
239
-
240
- if is_colab:
241
- return results.images
242
-
243
- for i in range(len(results.images)):
244
- if results.nsfw_content_detected[i]:
245
- results.images[i] = Image.open("nsfw.png")
246
- return results.images
247
-
248
- # css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
249
- # """
250
- with gr.Blocks(css="style.css") as demo:
251
- gr.HTML(
252
- f"""
253
- <div class="finetuned-diffusion-div">
254
- <div>
255
- <h1>Finetuned Diffusion</h1>
256
- </div>
257
- <p>
258
- Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
259
- <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/mo-di-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/dallinmackay/Van-Gogh-diffusion">Loving Vincent (Van Gogh)</a>, <a href="https://huggingface.co/nitrosocke/redshift-diffusion">Redshift renderer (Cinema4D)</a>, <a href="https://huggingface.co/prompthero/midjourney-v4-diffusion">Midjourney v4 style</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a>, <a href="https://huggingface.co/Fictiverse/Stable_Diffusion_BalloonArt_Model">Balloon Art</a> + in colab notebook you can load any other Diffusers 🧨 SD model hosted on HuggingFace 🤗.
260
- </p>
261
- <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
262
- Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
263
- </p>
264
- <p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
265
- <a style="display:inline-block" href="https://huggingface.co/spaces/anzorq/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
266
- </div>
267
- """
268
- )
269
- with gr.Row():
270
-
271
- with gr.Column(scale=55):
272
- with gr.Group():
273
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
274
- with gr.Box(visible=False) as custom_model_group:
275
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
276
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
277
-
278
- with gr.Row():
279
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
280
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
281
-
282
-
283
- # image_out = gr.Image(height=512)
284
- gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
285
-
286
- state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
287
- error_output = gr.Markdown()
288
-
289
- with gr.Column(scale=45):
290
- with gr.Tab("Options"):
291
- with gr.Group():
292
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
293
-
294
- n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
295
-
296
- with gr.Row():
297
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
298
- steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=75, step=1)
299
-
300
- with gr.Row():
301
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
302
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
303
-
304
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
305
-
306
- with gr.Tab("Image to image"):
307
- with gr.Group():
308
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
309
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
310
-
311
- if is_colab:
312
- model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
313
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
314
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
315
- steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
316
-
317
- inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
318
- outputs = [gallery, error_output]
319
- prompt.submit(inference, inputs=inputs, outputs=outputs)
320
- generate.click(inference, inputs=inputs, outputs=outputs)
321
-
322
- ex = gr.Examples([
323
- [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25],
324
- [models[4].name, "portrait of dwayne johnson", 7.0, 35],
325
- [models[5].name, "portrait of a beautiful alyx vance half life", 10, 25],
326
- [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30],
327
- [models[5].name, "fantasy portrait painting, digital art", 4.0, 20],
328
- ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
329
-
330
- gr.HTML("""
331
- <div style="border-top: 1px solid #303030;">
332
- <br>
333
- <p>Models by <a href="https://huggingface.co/nitrosocke">@nitrosocke</a>, <a href="https://twitter.com/haruu1367">@haruu1367</a>, <a href="https://twitter.com/DGSpitzer">@Helixngc7293</a>, <a href="https://twitter.com/dal_mack">@dal_mack</a>, <a href="https://twitter.com/prompthero">@prompthero</a> and others. ❤️</p>
334
- <p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
335
- <p>Space by:<br>
336
- <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a><br>
337
- <a href="https://github.com/qunash"><img alt="GitHub followers" src="https://img.shields.io/github/followers/qunash?style=social" alt="Github Follow"></a></p><br><br>
338
- <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
339
- <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion" alt="visitors"></p>
340
- </div>
341
- """)
342
-
343
- demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
344
-
345
- print(f"Space built in {time.time() - start_time:.2f} seconds")
346
-
347
- # if not is_colab:
348
- demo.queue(concurrency_count=1)
349
- demo.launch(debug=is_colab, share=is_colab)