Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,18 @@ from diffusers import StableDiffusion3Pipeline
|
|
3 |
import gradio as gr
|
4 |
import os
|
5 |
import spaces
|
|
|
6 |
|
7 |
-
HF_TOKEN = os.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
if torch.cuda.is_available():
|
10 |
device = "cuda"
|
@@ -14,7 +24,7 @@ else:
|
|
14 |
print("Using CPU")
|
15 |
|
16 |
# Initialize the pipeline and download the model
|
17 |
-
pipe = StableDiffusion3Pipeline.from_pretrained(
|
18 |
pipe.to(device)
|
19 |
|
20 |
# Define the image generation function
|
|
|
3 |
import gradio as gr
|
4 |
import os
|
5 |
import spaces
|
6 |
+
from huggingface_hub import snapshot_download
|
7 |
|
8 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
9 |
+
|
10 |
+
model_path = snapshot_download(
|
11 |
+
repo_id="stabilityai/stable-diffusion-3-medium",
|
12 |
+
revision="refs/pr/26",
|
13 |
+
repo_type="model",
|
14 |
+
ignore_patterns=["*.md", "*..gitattributes"],
|
15 |
+
local_dir="stable-diffusion-3-medium",
|
16 |
+
token=HF_TOKEN,
|
17 |
+
)
|
18 |
|
19 |
if torch.cuda.is_available():
|
20 |
device = "cuda"
|
|
|
24 |
print("Using CPU")
|
25 |
|
26 |
# Initialize the pipeline and download the model
|
27 |
+
pipe = StableDiffusion3Pipeline.from_pretrained(model_path, torch_dtype=torch.float16)
|
28 |
pipe.to(device)
|
29 |
|
30 |
# Define the image generation function
|