Manjushri commited on
Commit
fdf21aa
1 Parent(s): 66f23ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -196
app.py CHANGED
@@ -51,165 +51,6 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
51
  torch.cuda.empty_cache()
52
  return image
53
 
54
- if Model == "Anime":
55
- anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
56
- anime.enable_xformers_memory_efficient_attention()
57
- anime = anime.to(device)
58
- torch.cuda.empty_cache()
59
- if refine == "Yes":
60
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
61
- refiner.enable_xformers_memory_efficient_attention()
62
- refiner = refiner.to(device)
63
- torch.cuda.empty_cache()
64
- int_image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
65
- image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
66
- torch.cuda.empty_cache()
67
- if upscale == "Yes":
68
- refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
69
- refiner.enable_xformers_memory_efficient_attention()
70
- refiner = refiner.to(device)
71
- torch.cuda.empty_cache()
72
- upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
73
- torch.cuda.empty_cache()
74
- return upscaled
75
- else:
76
- return image
77
- else:
78
- if upscale == "Yes":
79
- image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
80
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
81
- upscaler.enable_xformers_memory_efficient_attention()
82
- upscaler = upscaler.to(device)
83
- torch.cuda.empty_cache()
84
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
85
- torch.cuda.empty_cache()
86
- return upscaled
87
- else:
88
- image = anime(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
89
- torch.cuda.empty_cache()
90
- return image
91
-
92
- if Model == "Disney":
93
- disney = DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-disney-v3.8.1")
94
- disney.enable_xformers_memory_efficient_attention()
95
- disney = disney.to(device)
96
- torch.cuda.empty_cache()
97
- if refine == "Yes":
98
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
99
- refiner.enable_xformers_memory_efficient_attention()
100
- refiner = refiner.to(device)
101
- torch.cuda.empty_cache()
102
- int_image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
103
- image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
104
- torch.cuda.empty_cache()
105
-
106
- if upscale == "Yes":
107
- refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
108
- refiner.enable_xformers_memory_efficient_attention()
109
- refiner = refiner.to(device)
110
- torch.cuda.empty_cache()
111
- upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
112
- torch.cuda.empty_cache()
113
- return upscaled
114
- else:
115
- return image
116
- else:
117
- if upscale == "Yes":
118
- image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
119
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
120
- upscaler.enable_xformers_memory_efficient_attention()
121
- upscaler = upscaler.to(device)
122
- torch.cuda.empty_cache()
123
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
124
- torch.cuda.empty_cache()
125
- return upscaled
126
- else:
127
- image = disney(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
128
- torch.cuda.empty_cache()
129
- return image
130
-
131
- if Model == "StoryBook":
132
- story = DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-story-v3.8.1")
133
- story.enable_xformers_memory_efficient_attention()
134
- story = story.to(device)
135
- torch.cuda.empty_cache()
136
- if refine == "Yes":
137
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
138
- refiner.enable_xformers_memory_efficient_attention()
139
- refiner = refiner.to(device)
140
- torch.cuda.empty_cache()
141
- int_image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
142
- image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
143
- torch.cuda.empty_cache()
144
-
145
- if upscale == "Yes":
146
- refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
147
- refiner.enable_xformers_memory_efficient_attention()
148
- refiner = refiner.to(device)
149
- torch.cuda.empty_cache()
150
- upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
151
- torch.cuda.empty_cache()
152
- return upscaled
153
- else:
154
- return image
155
- else:
156
- if upscale == "Yes":
157
- image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
158
-
159
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
160
- upscaler.enable_xformers_memory_efficient_attention()
161
- upscaler = upscaler.to(device)
162
- torch.cuda.empty_cache()
163
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
164
- torch.cuda.empty_cache()
165
- return upscaled
166
- else:
167
-
168
- image = story(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
169
- torch.cuda.empty_cache()
170
- return image
171
-
172
- if Model == "SemiReal":
173
- semi = DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-semi-v3.8.1")
174
- semi.enable_xformers_memory_efficient_attention()
175
- semi = semi.to(device)
176
- torch.cuda.empty_cache()
177
- if refine == "Yes":
178
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
179
- refiner.enable_xformers_memory_efficient_attention()
180
- refiner = refiner.to(device)
181
- torch.cuda.empty_cache()
182
- image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
183
- image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
184
- torch.cuda.empty_cache()
185
-
186
- if upscale == "Yes":
187
- refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
188
- refiner.enable_xformers_memory_efficient_attention()
189
- refiner = refiner.to(device)
190
- torch.cuda.empty_cache()
191
- upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
192
- torch.cuda.empty_cache()
193
- return upscaled
194
- else:
195
- return image
196
- else:
197
- if upscale == "Yes":
198
- image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
199
-
200
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
201
- upscaler.enable_xformers_memory_efficient_attention()
202
- upscaler = upscaler.to(device)
203
- torch.cuda.empty_cache()
204
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
205
- torch.cuda.empty_cache()
206
- return upscaled
207
- else:
208
-
209
- image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
210
- torch.cuda.empty_cache()
211
- return image
212
-
213
  if Model == "Animagine XL 3.0":
214
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
215
  animagine.enable_xformers_memory_efficient_attention()
@@ -349,44 +190,10 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
349
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
350
  torch.cuda.empty_cache()
351
  return image
352
- if Model == 'SDXL-Turbo':
353
- torch.cuda.empty_cache()
354
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
355
- pipe.enable_xformers_memory_efficient_attention()
356
- pipe = pipe.to(device)
357
- image = pipe(prompt=Prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
358
- if refine == "Yes":
359
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
360
- refiner.enable_xformers_memory_efficient_attention()
361
- refiner = refiner.to(device)
362
- torch.cuda.empty_cache()
363
- refined = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
364
- torch.cuda.empty_cache()
365
- if upscale == 'Yes':
366
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
367
- upscaler.enable_xformers_memory_efficient_attention()
368
- upscaler = upscaler.to(device)
369
- torch.cuda.empty_cache()
370
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=5, guidance_scale=0).images[0]
371
- torch.cuda.empty_cache()
372
- return upscaled
373
- else:
374
- return refined
375
-
376
- if upscale == "Yes":
377
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
378
- upscaler.enable_xformers_memory_efficient_attention()
379
- upscaler = upscaler.to(device)
380
- torch.cuda.empty_cache()
381
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
382
- torch.cuda.empty_cache()
383
- return upscaled
384
- else:
385
- image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
386
- torch.cuda.empty_cache()
387
  return image
388
 
389
- gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL', 'SDXL-Turbo'], value='PhotoReal', label='Choose Model'),
390
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
391
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
392
  gr.Slider(512, 1024, 768, step=128, label='Height'),
@@ -398,6 +205,6 @@ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryB
398
  gr.Slider(minimum=.9, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %'),
399
  gr.Radio(["Yes", "No"], label = 'SD X2 Latent Upscaler?', value="No")],
400
  outputs=gr.Image(label='Generated Image'),
401
- title="Manju Dream Booth V1.9 with SDXL 1.0 Refiner and SD X2 Latent Upscaler - GPU",
402
  description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
403
  article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)
 
51
  torch.cuda.empty_cache()
52
  return image
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  if Model == "Animagine XL 3.0":
55
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
56
  animagine.enable_xformers_memory_efficient_attention()
 
190
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
191
  torch.cuda.empty_cache()
192
  return image
193
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  return image
195
 
196
+ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL',], value='PhotoReal', label='Choose Model'),
197
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
198
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
199
  gr.Slider(512, 1024, 768, step=128, label='Height'),
 
205
  gr.Slider(minimum=.9, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %'),
206
  gr.Radio(["Yes", "No"], label = 'SD X2 Latent Upscaler?', value="No")],
207
  outputs=gr.Image(label='Generated Image'),
208
+ title="Manju Dream Booth V2.0 with SDXL 1.0 Refiner and SD X2 Latent Upscaler - GPU",
209
  description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
210
  article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)