alfredplpl commited on
Commit
9d44341
1 Parent(s): b80f9f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -29,7 +29,7 @@ pipe_i2i.enable_xformers_memory_efficient_attention()
29
 
30
  upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
31
  upscaler.enable_xformers_memory_efficient_attention()
32
- upscaler.to("cuda")
33
 
34
  if torch.cuda.is_available():
35
  pipe = pipe.to("cuda")
@@ -137,6 +137,7 @@ def auto_prompt_correction(prompt_ui,neg_prompt_ui,cool_japan_type_ui,disable_au
137
  return prompt,neg_prompt
138
 
139
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,superreso=False):
 
140
  if(superreso):
141
  low_res_latents = pipe(
142
  prompt,
@@ -147,7 +148,8 @@ def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,sup
147
  height = height,
148
  output_type="latent",
149
  generator = generator).images
150
-
 
151
  result = upscaler(
152
  prompt=prompt,
153
  negative_prompt = neg_prompt,
@@ -156,6 +158,9 @@ def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,sup
156
  guidance_scale=guidance,
157
  generator=generator,
158
  )
 
 
 
159
  else:
160
  result = pipe(
161
  prompt,
@@ -183,7 +188,8 @@ def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height
183
  #height = height,
184
  output_type="latent",
185
  generator = generator).images
186
-
 
187
  result = upscaler(
188
  prompt=prompt,
189
  negative_prompt = neg_prompt,
@@ -192,6 +198,9 @@ def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height
192
  guidance_scale=guidance,
193
  generator=generator,
194
  )
 
 
 
195
  else:
196
  result = pipe_i2i(
197
  prompt,
 
29
 
30
  upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
31
  upscaler.enable_xformers_memory_efficient_attention()
32
+
33
 
34
  if torch.cuda.is_available():
35
  pipe = pipe.to("cuda")
 
137
  return prompt,neg_prompt
138
 
139
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,superreso=False):
140
+ global pipe, upscaler
141
  if(superreso):
142
  low_res_latents = pipe(
143
  prompt,
 
148
  height = height,
149
  output_type="latent",
150
  generator = generator).images
151
+ pipe=pipe.to("cpu")
152
+ upscaler=upscaler.to("cuda")
153
  result = upscaler(
154
  prompt=prompt,
155
  negative_prompt = neg_prompt,
 
158
  guidance_scale=guidance,
159
  generator=generator,
160
  )
161
+ result=result.to("cpu")
162
+ pipe=pipe.to("cuda")
163
+ upscaler=upscaler.to("cpu")
164
  else:
165
  result = pipe(
166
  prompt,
 
188
  #height = height,
189
  output_type="latent",
190
  generator = generator).images
191
+ pipe=pipe.to("cpu")
192
+ upscaler=upscaler.to("cuda")
193
  result = upscaler(
194
  prompt=prompt,
195
  negative_prompt = neg_prompt,
 
198
  guidance_scale=guidance,
199
  generator=generator,
200
  )
201
+ result=result.to("cpu")
202
+ pipe=pipe.to("cuda")
203
+ upscaler=upscaler.to("cpu")
204
  else:
205
  result = pipe_i2i(
206
  prompt,