Dao3 commited on
Commit
8e15d03
1 Parent(s): 6066892

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -0
app.py CHANGED
@@ -138,7 +138,122 @@ with gr.Blocks(css='style.css') as demo:
138
  run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
139
  run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
140
 
 
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  with gr.Row():
144
  gr.HTML(
 
138
  run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
139
  run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
140
 
141
+ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
142
 
143
+ print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
144
+
145
+ global last_mode
146
+ global pipe
147
+ global current_model_path
148
+ if model_path != current_model_path or last_mode != "txt2img":
149
+ current_model_path = model_path
150
+
151
+ update_state(f"Loading {current_model.name} text-to-image model...")
152
+
153
+ if is_colab or current_model == custom_model:
154
+ pipe = StableDiffusionPipeline.from_pretrained(
155
+ current_model_path,
156
+ torch_dtype=torch.float16,
157
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
158
+ safety_checker=lambda images, clip_input: (images, False)
159
+ )
160
+ else:
161
+ pipe = StableDiffusionPipeline.from_pretrained(
162
+ current_model_path,
163
+ torch_dtype=torch.float16,
164
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
165
+ )
166
+ # pipe = pipe.to("cpu")
167
+ # pipe = current_model.pipe_t2i
168
+
169
+ if torch.cuda.is_available():
170
+ pipe = pipe.to("cuda")
171
+ pipe.enable_xformers_memory_efficient_attention()
172
+ last_mode = "txt2img"
173
+
174
+ prompt = current_model.prefix + prompt
175
+ result = pipe(
176
+ prompt,
177
+ negative_prompt = neg_prompt,
178
+ num_images_per_prompt=n_images,
179
+ num_inference_steps = int(steps),
180
+ guidance_scale = guidance,
181
+ width = width,
182
+ height = height,
183
+ generator = generator,
184
+ callback=pipe_callback)
185
+
186
+ # update_state(f"Done. Seed: {seed}")
187
+
188
+ return replace_nsfw_images(result)
189
+
190
+ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
191
+
192
+ print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
193
+
194
+ global last_mode
195
+ global pipe
196
+ global current_model_path
197
+ if model_path != current_model_path or last_mode != "img2img":
198
+ current_model_path = model_path
199
+
200
+ update_state(f"Loading {current_model.name} image-to-image model...")
201
+
202
+ if is_colab or current_model == custom_model:
203
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
204
+ current_model_path,
205
+ torch_dtype=torch.float16,
206
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
207
+ safety_checker=lambda images, clip_input: (images, False)
208
+ )
209
+ else:
210
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
211
+ current_model_path,
212
+ torch_dtype=torch.float16,
213
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
214
+ )
215
+ # pipe = pipe.to("cpu")
216
+ # pipe = current_model.pipe_i2i
217
+
218
+ if torch.cuda.is_available():
219
+ pipe = pipe.to("cuda")
220
+ pipe.enable_xformers_memory_efficient_attention()
221
+ last_mode = "img2img"
222
+
223
+ prompt = current_model.prefix + prompt
224
+ ratio = min(height / img.height, width / img.width)
225
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
226
+ result = pipe(
227
+ prompt,
228
+ negative_prompt = neg_prompt,
229
+ num_images_per_prompt=n_images,
230
+ image = img,
231
+ num_inference_steps = int(steps),
232
+ strength = strength,
233
+ guidance_scale = guidance,
234
+ # width = width,
235
+ # height = height,
236
+ generator = generator,
237
+ callback=pipe_callback)
238
+
239
+ # update_state(f"Done. Seed: {seed}")
240
+
241
+ return replace_nsfw_images(result)
242
+
243
+ def replace_nsfw_images(results):
244
+
245
+ if is_colab:
246
+ return results.images
247
+
248
+ for i in range(len(results.images)):
249
+ if results.nsfw_content_detected[i]:
250
+ results.images[i] = Image.open("nsfw.png")
251
+ return results.images
252
+
253
+
254
+
255
+
256
+
257
 
258
  with gr.Row():
259
  gr.HTML(