Use fp32 for CPU diffusion pipeline.
Browse files
app.py
CHANGED
@@ -27,11 +27,14 @@ gpu = False
|
|
27 |
|
28 |
AUTH_TOKEN = os.environ.get('AUTH_TOKEN')
|
29 |
|
30 |
-
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, revision="fp16", use_auth_token=AUTH_TOKEN)
|
31 |
|
32 |
if gpu:
|
|
|
33 |
pipeline.to("cuda")
|
34 |
-
|
|
|
|
|
|
|
35 |
|
36 |
# Huggingface Spaces have 16GB RAM and 8 CPU cores
|
37 |
# See https://huggingface.co/docs/hub/spaces-overview#hardware-resources
|
|
|
27 |
|
28 |
AUTH_TOKEN = os.environ.get('AUTH_TOKEN')
|
29 |
|
|
|
30 |
|
31 |
if gpu:
|
32 |
+
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, revision="fp16", use_auth_token=AUTH_TOKEN)
|
33 |
pipeline.to("cuda")
|
34 |
+
else:
|
35 |
+
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
|
36 |
+
custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float32,
|
37 |
+
revision="fp32", use_auth_token=AUTH_TOKEN)
|
38 |
|
39 |
# Huggingface Spaces have 16GB RAM and 8 CPU cores
|
40 |
# See https://huggingface.co/docs/hub/spaces-overview#hardware-resources
|