Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ import torch
|
|
16 |
from diffusers import FluxPipeline
|
17 |
|
18 |
torch.backends.cuda.matmul.allow_tf32 = True
|
19 |
-
|
20 |
class timer:
|
21 |
def __init__(self, method_name="timed process"):
|
22 |
self.method = method_name
|
@@ -31,7 +31,7 @@ if not path.exists(cache_path):
|
|
31 |
os.makedirs(cache_path, exist_ok=True)
|
32 |
|
33 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
|
34 |
-
pipe.load_lora_weights(hf_hub_download("RED-AIGC/TDD", "FLUX.1-dev_tdd_lora_weights.safetensors"))
|
35 |
pipe.fuse_lora(lora_scale=0.125)
|
36 |
pipe.to("cuda")
|
37 |
|
@@ -80,14 +80,37 @@ with gr.Blocks(css=css) as demo:
|
|
80 |
with gr.Row():
|
81 |
steps = gr.Slider(label="Inference Steps", minimum=4, maximum=10, step=1, value=8)
|
82 |
scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=3.5, step=0.1, value=2.0)
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
generate_btn = gr.Button("Generate Image", variant="primary", scale=1)
|
87 |
|
88 |
with gr.Column(scale=4):
|
89 |
output = gr.Image(label="Your Generated Image")
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
gr.Markdown(
|
92 |
"""
|
93 |
<div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px; background-color: #f0f0f0;">
|
@@ -103,8 +126,14 @@ with gr.Blocks(css=css) as demo:
|
|
103 |
)
|
104 |
|
105 |
@spaces.GPU
|
106 |
-
def process_image(height, width, steps, scales,
|
107 |
global pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
109 |
return pipe(
|
110 |
prompt=[prompt],
|
@@ -118,7 +147,7 @@ with gr.Blocks(css=css) as demo:
|
|
118 |
|
119 |
generate_btn.click(
|
120 |
process_image,
|
121 |
-
inputs=[height, width, steps, scales,
|
122 |
outputs=output
|
123 |
)
|
124 |
|
|
|
16 |
from diffusers import FluxPipeline
|
17 |
|
18 |
torch.backends.cuda.matmul.allow_tf32 = True
|
19 |
+
loaded_acc = None
|
20 |
class timer:
|
21 |
def __init__(self, method_name="timed process"):
|
22 |
self.method = method_name
|
|
|
31 |
os.makedirs(cache_path, exist_ok=True)
|
32 |
|
33 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
|
34 |
+
pipe.load_lora_weights(hf_hub_download("RED-AIGC/TDD", "FLUX.1-dev_tdd_lora_weights.safetensors"),adapter_name="TDD")
|
35 |
pipe.fuse_lora(lora_scale=0.125)
|
36 |
pipe.to("cuda")
|
37 |
|
|
|
80 |
with gr.Row():
|
81 |
steps = gr.Slider(label="Inference Steps", minimum=4, maximum=10, step=1, value=8)
|
82 |
scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=3.5, step=0.1, value=2.0)
|
83 |
+
with gr.Row():
|
84 |
+
seed = gr.Number(label="Seed", value=3420, precision=0)
|
85 |
+
with gr.Row():
|
86 |
+
acc = gr.Dropdown(
|
87 |
+
label="Accelerate Lora",
|
88 |
+
choices=["TDD", "TDD_adv"],
|
89 |
+
value="TDD",
|
90 |
+
)
|
91 |
|
92 |
generate_btn = gr.Button("Generate Image", variant="primary", scale=1)
|
93 |
|
94 |
with gr.Column(scale=4):
|
95 |
output = gr.Image(label="Your Generated Image")
|
96 |
|
97 |
+
person="Portrait of a girl, high-end photography, prominent facial details, significant depth of field, soft lighting, late afternoon sun, in the style of Richard Avedon, Henri Cartier-Bresson, and Yousuf Karsh. Golden ratio composition, exquisite detail, Hasselblad H6D-100c, featured on the cover of photography magazines."
|
98 |
+
dog="Portrait photo of a Shiba Inu, photograph, highly detailed fur, warm and cheerful light, soft pastel tones, vibrant and sunny atmosphere, style by Tim Flach, bright daylight, natural setting, centered, extremely detailed, Nikon D850, award-winning photography"
|
99 |
+
# scenery="A high-detail shot of the sea and sky, capturing the vibrant blues and crisp sunlight. Shot with a Canon 5D Mark IV, award-winning photography."
|
100 |
+
scenery="Capture the golden hour's warm embrace on a sandy beach with a Nikon D850, highlighting the vibrant blues and the soft, golden sunlight. This award-winning photography captures the essence of a summer sunset by the coast."
|
101 |
+
gr.Examples(
|
102 |
+
examples=[
|
103 |
+
[person, "TDD", 1024, 1024, 8, 2, 8888],
|
104 |
+
[dog, "TDD", 1024, 1024, 6, 2, 29],
|
105 |
+
[scenery, "TDD", 1024, 1024, 6, 2.2, 3777],
|
106 |
+
],
|
107 |
+
# inputs=[prompt, negative_prompt, ckpt, acc, steps, guidance_scale, eta, seed],
|
108 |
+
inputs=[prompt,acc, height, width, steps, scales, seed],
|
109 |
+
outputs=output,
|
110 |
+
fn=process_image,
|
111 |
+
cache_examples="lazy",
|
112 |
+
)
|
113 |
+
|
114 |
gr.Markdown(
|
115 |
"""
|
116 |
<div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px; background-color: #f0f0f0;">
|
|
|
126 |
)
|
127 |
|
128 |
@spaces.GPU
|
129 |
+
def process_image(prompt,acc,height, width, steps, scales, seed):
|
130 |
global pipe
|
131 |
+
global loaded_acc
|
132 |
+
if loaded_acc != acc:
|
133 |
+
#pipe.load_lora_weights(ACC_lora[acc], adapter_name=acc)
|
134 |
+
pipe.set_adapters([acc], adapter_weights=[1.0])
|
135 |
+
print(pipe.get_active_adapters())
|
136 |
+
loaded_acc = acc
|
137 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
138 |
return pipe(
|
139 |
prompt=[prompt],
|
|
|
147 |
|
148 |
generate_btn.click(
|
149 |
process_image,
|
150 |
+
inputs=[prompt, acc,height, width, steps, scales, seed],
|
151 |
outputs=output
|
152 |
)
|
153 |
|