Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -72,7 +72,7 @@ else:
|
|
72 |
)
|
73 |
|
74 |
if torch.cuda.is_available():
|
75 |
-
pipe = pipe.to("
|
76 |
pipe.enable_xformers_memory_efficient_attention()
|
77 |
|
78 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
@@ -123,7 +123,7 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
|
|
123 |
if seed == 0:
|
124 |
seed = random.randint(0, 2147483647)
|
125 |
|
126 |
-
generator = torch.Generator('
|
127 |
|
128 |
try:
|
129 |
if img is not None:
|
@@ -162,7 +162,7 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
162 |
# pipe = current_model.pipe_t2i
|
163 |
|
164 |
if torch.cuda.is_available():
|
165 |
-
pipe = pipe.to("
|
166 |
pipe.enable_xformers_memory_efficient_attention()
|
167 |
last_mode = "txt2img"
|
168 |
|
@@ -211,7 +211,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
211 |
# pipe = current_model.pipe_i2i
|
212 |
|
213 |
if torch.cuda.is_available():
|
214 |
-
pipe = pipe.to("
|
215 |
pipe.enable_xformers_memory_efficient_attention()
|
216 |
last_mode = "img2img"
|
217 |
|
|
|
72 |
)
|
73 |
|
74 |
if torch.cuda.is_available():
|
75 |
+
pipe = pipe.to("cpu")
|
76 |
pipe.enable_xformers_memory_efficient_attention()
|
77 |
|
78 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
|
|
123 |
if seed == 0:
|
124 |
seed = random.randint(0, 2147483647)
|
125 |
|
126 |
+
generator = torch.Generator('cpu').manual_seed(seed)
|
127 |
|
128 |
try:
|
129 |
if img is not None:
|
|
|
162 |
# pipe = current_model.pipe_t2i
|
163 |
|
164 |
if torch.cuda.is_available():
|
165 |
+
pipe = pipe.to("cpu")
|
166 |
pipe.enable_xformers_memory_efficient_attention()
|
167 |
last_mode = "txt2img"
|
168 |
|
|
|
211 |
# pipe = current_model.pipe_i2i
|
212 |
|
213 |
if torch.cuda.is_available():
|
214 |
+
pipe = pipe.to("cpu")
|
215 |
pipe.enable_xformers_memory_efficient_attention()
|
216 |
last_mode = "img2img"
|
217 |
|