Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,34 +1,26 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
import os
|
4 |
from diffusers import StableDiffusion3Pipeline
|
5 |
-
|
6 |
|
|
|
7 |
hf_api_key = os.getenv("PRODIGY_GA_02")
|
8 |
if hf_api_key is None:
|
9 |
raise ValueError("Hugging Face API key 'PRODIGY_GA_02' not found. Ensure it is set as a secret.")
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
-
# Example inference request
|
15 |
-
response = inference(inputs="Your input text here")
|
16 |
-
print(response)
|
17 |
-
|
18 |
-
# Load model
|
19 |
-
model_id = "stabilityai/stable-diffusion-3.5-medium"
|
20 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium")
|
21 |
|
22 |
-
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
# Define Gradio interface
|
27 |
def generate_image(prompt):
|
28 |
images = pipe(prompt).images
|
29 |
return images[0]
|
30 |
|
31 |
-
|
32 |
# Create Gradio UI
|
33 |
iface = gr.Interface(
|
34 |
fn=generate_image,
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
3 |
from diffusers import StableDiffusion3Pipeline
|
4 |
+
import os
|
5 |
|
6 |
+
# Retrieve Hugging Face API key from environment variables
|
7 |
hf_api_key = os.getenv("PRODIGY_GA_02")
|
8 |
if hf_api_key is None:
|
9 |
raise ValueError("Hugging Face API key 'PRODIGY_GA_02' not found. Ensure it is set as a secret.")
|
10 |
|
11 |
+
# Load model with authentication token
|
12 |
+
model_id = "stabilityai/stable-diffusion-3.5-large"
|
13 |
+
pipe = StableDiffusion3Pipeline.from_pretrained(model_id, use_auth_token=hf_api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# Move model to the correct device
|
16 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
+
pipe.to(device)
|
18 |
|
19 |
+
# Define Gradio interface function
|
|
|
|
|
20 |
def generate_image(prompt):
|
21 |
images = pipe(prompt).images
|
22 |
return images[0]
|
23 |
|
|
|
24 |
# Create Gradio UI
|
25 |
iface = gr.Interface(
|
26 |
fn=generate_image,
|