Spaces:
Sleeping
Sleeping
Raj Singh
commited on
Commit
•
c61aacb
1
Parent(s):
8961a38
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,8 @@ import numpy as np
|
|
3 |
import random
|
4 |
import spaces
|
5 |
import torch
|
6 |
-
from diffusers import
|
7 |
-
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
9 |
import os # Import os module to access environment variables
|
10 |
|
@@ -14,7 +14,6 @@ hf_token = os.environ.get("HF_API_TOKEN")
|
|
14 |
dtype = torch.bfloat16
|
15 |
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
-
|
18 |
|
19 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
20 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype, token=hf_token).to(device)
|
@@ -43,21 +42,31 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
|
|
43 |
good_vae=good_vae,
|
44 |
):
|
45 |
yield img, seed
|
46 |
-
|
47 |
examples = [
|
48 |
"a tiny astronaut hatching from an egg on the moon",
|
49 |
"a cat holding a sign that says hello world",
|
50 |
"an anime illustration of a wiener schnitzel",
|
51 |
]
|
52 |
|
53 |
-
css="""
|
54 |
#col-container {
|
55 |
margin: 0 auto;
|
56 |
max-width: 520px;
|
57 |
}
|
58 |
"""
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
with gr.Column(elem_id="col-container"):
|
63 |
gr.Markdown(f"""# FLUX.1 [dev]
|
@@ -128,18 +137,18 @@ with gr.Blocks(css=css) as demo:
|
|
128 |
)
|
129 |
|
130 |
gr.Examples(
|
131 |
-
examples
|
132 |
-
fn
|
133 |
-
inputs
|
134 |
-
outputs
|
135 |
cache_examples="lazy"
|
136 |
)
|
137 |
|
138 |
gr.on(
|
139 |
triggers=[run_button.click, prompt.submit],
|
140 |
-
fn
|
141 |
-
inputs
|
142 |
-
outputs
|
143 |
)
|
144 |
|
145 |
-
demo.launch()
|
|
|
3 |
import random
|
4 |
import spaces
|
5 |
import torch
|
6 |
+
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
|
7 |
+
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
9 |
import os # Import os module to access environment variables
|
10 |
|
|
|
14 |
dtype = torch.bfloat16
|
15 |
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
17 |
|
18 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
19 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype, token=hf_token).to(device)
|
|
|
42 |
good_vae=good_vae,
|
43 |
):
|
44 |
yield img, seed
|
45 |
+
|
46 |
examples = [
|
47 |
"a tiny astronaut hatching from an egg on the moon",
|
48 |
"a cat holding a sign that says hello world",
|
49 |
"an anime illustration of a wiener schnitzel",
|
50 |
]
|
51 |
|
52 |
+
css = """
|
53 |
#col-container {
|
54 |
margin: 0 auto;
|
55 |
max-width: 520px;
|
56 |
}
|
57 |
"""
|
58 |
|
59 |
+
# Define the dark theme
|
60 |
+
dark_theme = gr.themes.Default(
|
61 |
+
primary_hue="blue",
|
62 |
+
neutral_hue="gray",
|
63 |
+
spacing_size="md",
|
64 |
+
font="default",
|
65 |
+
# Set mode to 'dark' to enable dark mode
|
66 |
+
mode="dark"
|
67 |
+
)
|
68 |
+
|
69 |
+
with gr.Blocks(theme=dark_theme, css=css) as demo:
|
70 |
|
71 |
with gr.Column(elem_id="col-container"):
|
72 |
gr.Markdown(f"""# FLUX.1 [dev]
|
|
|
137 |
)
|
138 |
|
139 |
gr.Examples(
|
140 |
+
examples=examples,
|
141 |
+
fn=infer,
|
142 |
+
inputs=[prompt],
|
143 |
+
outputs=[result, seed],
|
144 |
cache_examples="lazy"
|
145 |
)
|
146 |
|
147 |
gr.on(
|
148 |
triggers=[run_button.click, prompt.submit],
|
149 |
+
fn=infer,
|
150 |
+
inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
151 |
+
outputs=[result, seed]
|
152 |
)
|
153 |
|
154 |
+
demo.launch()
|