Boboiazumi commited on
Commit
943620a
1 Parent(s): 31e2186

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -35
app.py CHANGED
@@ -48,6 +48,11 @@ def load_pipeline(model_name):
48
  StableDiffusionXLPipeline.from_single_file
49
  if MODEL.endswith(".safetensors")
50
  else StableDiffusionXLPipeline.from_pretrained
 
 
 
 
 
51
  )
52
 
53
  pipe = pipeline(
@@ -60,9 +65,28 @@ def load_pipeline(model_name):
60
  use_auth_token=HF_TOKEN,
61
  )
62
 
 
 
 
 
 
 
 
 
 
 
63
  pipe.to(device)
64
- return pipe
 
65
 
 
 
 
 
 
 
 
 
66
 
67
  @spaces.GPU
68
  def generate(
@@ -82,6 +106,7 @@ def generate(
82
  upscale_by: float = 1.5,
83
  add_quality_tags: bool = True,
84
  isImg2Img: bool = True,
 
85
 
86
  progress=gr.Progress(track_tqdm=True),
87
  ):
@@ -107,6 +132,9 @@ def generate(
107
  backup_scheduler = pipe.scheduler
108
  pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
109
 
 
 
 
110
  if use_upscaler:
111
  upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
112
  metadata = {
@@ -142,38 +170,77 @@ def generate(
142
 
143
  try:
144
  if use_upscaler:
145
- latents = pipe(
146
- prompt=prompt,
147
- negative_prompt=negative_prompt,
148
- width=width,
149
- height=height,
150
- guidance_scale=guidance_scale,
151
- num_inference_steps=num_inference_steps,
152
- generator=generator,
153
- output_type="latent",
154
- ).images
155
- upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
156
- images = upscaler_pipe(
157
- prompt=prompt,
158
- negative_prompt=negative_prompt,
159
- image=upscaled_latents,
160
- guidance_scale=guidance_scale,
161
- num_inference_steps=num_inference_steps,
162
- strength=upscaler_strength,
163
- generator=generator,
164
- output_type="pil",
165
- ).images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  else:
167
- images = pipe(
168
- prompt=prompt,
169
- negative_prompt=negative_prompt,
170
- width=width,
171
- height=height,
172
- guidance_scale=guidance_scale,
173
- num_inference_steps=num_inference_steps,
174
- generator=generator,
175
- output_type="pil",
176
- ).images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
  if images:
179
  image_paths = [
@@ -192,14 +259,15 @@ def generate(
192
  if use_upscaler:
193
  del upscaler_pipe
194
  pipe.scheduler = backup_scheduler
 
195
  utils.free_memory()
196
 
197
 
198
  if torch.cuda.is_available():
199
- pipe = load_pipeline(MODEL)
200
  logger.info("Loaded on Device!")
201
  else:
202
- pipe = None
203
 
204
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.style_list}
205
  quality_prompt = {
@@ -241,7 +309,9 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
241
  )
242
  image = gr.Image(
243
  label="Image Input",
244
- visible=False
 
 
245
  )
246
  with gr.Accordion(label="Quality Tags", open=True):
247
  add_quality_tags = gr.Checkbox(
@@ -402,6 +472,8 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
402
  upscaler_strength,
403
  upscale_by,
404
  add_quality_tags,
 
 
405
  ],
406
  outputs=[result, gr_metadata],
407
  api_name="run",
 
48
  StableDiffusionXLPipeline.from_single_file
49
  if MODEL.endswith(".safetensors")
50
  else StableDiffusionXLPipeline.from_pretrained
51
+
52
+ img_pipeline = (
53
+ StableDiffusionXLImg2ImgPipeline.from_single_file
54
+ if MODEL.endswith(".safetensors")
55
+ else StableDiffusionXLImg2ImgPipeline.from_pretrained
56
  )
57
 
58
  pipe = pipeline(
 
65
  use_auth_token=HF_TOKEN,
66
  )
67
 
68
+ img_pipe = img_pipeline(
69
+ model_name,
70
+ vae=vae,
71
+ torch_dtype=torch.float16,
72
+ custom_pipeline="lpw_stable_diffusion_xl",
73
+ use_safetensors=True,
74
+ add_watermarker=False,
75
+ use_auth_token=HF_TOKEN,
76
+ )
77
+
78
  pipe.to(device)
79
+ img_pipe.to(device)
80
+ return pipe, img_pipe
81
 
82
+ def load_img(resize_width,img):
83
+ img = Image.open(img)
84
+ width, height = img.size
85
+ scale = resize_width / width
86
+ resize_height = height * scale
87
+
88
+ img = img.resize((resize_width, resize_height), Image.Resampling.LANCZOS)
89
+ return img, resize_width, resize_height
90
 
91
  @spaces.GPU
92
  def generate(
 
106
  upscale_by: float = 1.5,
107
  add_quality_tags: bool = True,
108
  isImg2Img: bool = True,
109
+ img_path: str= ""
110
 
111
  progress=gr.Progress(track_tqdm=True),
112
  ):
 
132
  backup_scheduler = pipe.scheduler
133
  pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
134
 
135
+ img_backup_scheduler = img_pipe.scheduler
136
+ img_pipe.scheduler = utils.get_scheduler(img_pipe.scheduler.config, sampler)
137
+
138
  if use_upscaler:
139
  upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
140
  metadata = {
 
170
 
171
  try:
172
  if use_upscaler:
173
+ if isImg2Img:
174
+ img, img_width, img_height = load_img(512, img_path)
175
+ latents = img_pipe(
176
+ prompt=prompt,
177
+ negative_prompt=negative_prompt,
178
+ width=img_width,
179
+ height=img_height,
180
+ image=img,
181
+ guidance_scale=guidance_scale,
182
+ num_inference_steps=num_inference_steps,
183
+ generator=generator,
184
+ output_type="latent",
185
+ ).images
186
+ upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
187
+ images = upscaler_pipe(
188
+ prompt=prompt,
189
+ negative_prompt=negative_prompt,
190
+ image=upscaled_latents,
191
+ guidance_scale=guidance_scale,
192
+ num_inference_steps=num_inference_steps,
193
+ strength=upscaler_strength,
194
+ generator=generator,
195
+ output_type="pil",
196
+ ).images
197
+ else:
198
+ latents = pipe(
199
+ prompt=prompt,
200
+ negative_prompt=negative_prompt,
201
+ width=width,
202
+ height=height,
203
+ guidance_scale=guidance_scale,
204
+ num_inference_steps=num_inference_steps,
205
+ generator=generator,
206
+ output_type="latent",
207
+ ).images
208
+ upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
209
+ images = upscaler_pipe(
210
+ prompt=prompt,
211
+ negative_prompt=negative_prompt,
212
+ image=upscaled_latents,
213
+ guidance_scale=guidance_scale,
214
+ num_inference_steps=num_inference_steps,
215
+ strength=upscaler_strength,
216
+ generator=generator,
217
+ output_type="pil",
218
+ ).images
219
  else:
220
+ if isImg2Img:
221
+ img, img_width, img_height = load_img(512, img_path)
222
+ images = pipe(
223
+ prompt=prompt,
224
+ negative_prompt=negative_prompt,
225
+ width=img_width,
226
+ height=img_height,
227
+ image=img,
228
+ guidance_scale=guidance_scale,
229
+ num_inference_steps=num_inference_steps,
230
+ generator=generator,
231
+ output_type="pil",
232
+ ).images
233
+ else:
234
+ images = img_pipe(
235
+ prompt=prompt,
236
+ negative_prompt=negative_prompt,
237
+ width=width,
238
+ height=height,
239
+ guidance_scale=guidance_scale,
240
+ num_inference_steps=num_inference_steps,
241
+ generator=generator,
242
+ output_type="pil",
243
+ ).images
244
 
245
  if images:
246
  image_paths = [
 
259
  if use_upscaler:
260
  del upscaler_pipe
261
  pipe.scheduler = backup_scheduler
262
+ img_pipe.scheduler = img_backup_scheduler
263
  utils.free_memory()
264
 
265
 
266
  if torch.cuda.is_available():
267
+ pipe, img_pipe = load_pipeline(MODEL)
268
  logger.info("Loaded on Device!")
269
  else:
270
+ pipe, img_pipe = None, None
271
 
272
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.style_list}
273
  quality_prompt = {
 
309
  )
310
  image = gr.Image(
311
  label="Image Input",
312
+ visible=False,
313
+ source="upload",
314
+ type="filepath"
315
  )
316
  with gr.Accordion(label="Quality Tags", open=True):
317
  add_quality_tags = gr.Checkbox(
 
472
  upscaler_strength,
473
  upscale_by,
474
  add_quality_tags,
475
+ isImg2Img,
476
+ img_path,
477
  ],
478
  outputs=[result, gr_metadata],
479
  api_name="run",