Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,263 @@
|
|
1 |
-
|
|
|
2 |
import gradio as gr
|
3 |
-
import
|
|
|
4 |
|
5 |
-
|
|
|
|
|
6 |
|
7 |
-
|
8 |
|
9 |
-
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
import gradio as gr
|
4 |
+
import spaces
|
5 |
+
import cv2
|
6 |
|
7 |
+
from typing import Dict
|
8 |
+
from torchvision.transforms.functional import to_tensor, center_crop, resize
|
9 |
+
from PIL import Image
|
10 |
|
11 |
+
from ui_model import fetch_model, process_sketch, process_mask
|
12 |
|
13 |
+
engage_logo = Image.open("engage_studios_logo.png").resize((700, 88), Image.Resampling.BICUBIC)
|
14 |
+
engage_logo_mask = np.array(engage_logo.split()[-1])[..., None] / 255
|
15 |
+
engage_logo_np = np.array(engage_logo.convert('RGB'))
|
16 |
|
17 |
+
pipe = fetch_model()
|
18 |
+
pipe.to('cuda')
|
19 |
+
|
20 |
+
|
21 |
+
@spaces.GPU
|
22 |
+
def run_text_to_image(prompt=None, neg_prompt=None,
|
23 |
+
inference_steps=8, num_images=2,
|
24 |
+
guidance_scale=2.0,
|
25 |
+
guidance_rescale=0.0,
|
26 |
+
|
27 |
+
height=1024,
|
28 |
+
width=1024,
|
29 |
+
|
30 |
+
condition_scale=0.5,
|
31 |
+
|
32 |
+
exposure=0.0,
|
33 |
+
progress=gr.Progress()):
|
34 |
+
images = pipe(prompt=prompt,
|
35 |
+
negative_prompt=neg_prompt,
|
36 |
+
num_images_per_prompt=num_images,
|
37 |
+
num_inference_steps=inference_steps,
|
38 |
+
height=height,
|
39 |
+
width=width,
|
40 |
+
guidance_scale=guidance_scale,
|
41 |
+
guidance_rescale=guidance_rescale,
|
42 |
+
controlnet_conditioning_scale=condition_scale,
|
43 |
+
gradio_progress=progress,
|
44 |
+
cross_attention_kwargs={"scale": exposure}
|
45 |
+
).images
|
46 |
+
return images
|
47 |
+
|
48 |
+
|
49 |
+
def run_model(user_state, condition_image, settings, prompt, neg_prompt, inference_steps=8, num_images=2,
|
50 |
+
guidance_scale=2.0,
|
51 |
+
guidance_rescale=0.0,
|
52 |
+
enable_freeu=False,
|
53 |
+
|
54 |
+
height=1024,
|
55 |
+
width=1024,
|
56 |
+
|
57 |
+
condition_scale=0.5,
|
58 |
+
sketch_detail=1.0,
|
59 |
+
sketch_softness=0.5,
|
60 |
+
inpaint_strength=0.9,
|
61 |
+
|
62 |
+
exposure=0.0,
|
63 |
+
enable_stylation=False,
|
64 |
+
|
65 |
+
style_1_down=0.0,
|
66 |
+
style_1_mid=0.0,
|
67 |
+
style_1_up=0.0,
|
68 |
+
|
69 |
+
style_2_down=0.0,
|
70 |
+
style_2_mid=0.0,
|
71 |
+
style_2_up=0.0,
|
72 |
+
|
73 |
+
style_3_down=0.0,
|
74 |
+
style_3_mid=0.0,
|
75 |
+
style_3_up=0.0,
|
76 |
+
|
77 |
+
style_4_down=0.0,
|
78 |
+
style_4_mid=0.0,
|
79 |
+
style_4_up=0.0,
|
80 |
+
|
81 |
+
seed=None,
|
82 |
+
progress=gr.Progress()):
|
83 |
+
# prompt += ", shot with a mirrorless, 35mm, photography, real, 8k, photorealistic, "
|
84 |
+
prompt += "best quality, HD, ~*~aesthetic~*~"
|
85 |
+
|
86 |
+
np.random.seed(seed)
|
87 |
+
torch.manual_seed(seed)
|
88 |
+
|
89 |
+
progress(0, desc="Thinking...", total=int(inference_steps))
|
90 |
+
|
91 |
+
if enable_freeu:
|
92 |
+
pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
93 |
+
else:
|
94 |
+
pipe.disable_freeu()
|
95 |
+
|
96 |
+
if exposure != 0.0 and enable_stylation:
|
97 |
+
pipe.enable_lora()
|
98 |
+
adapter_weight_scales_ENGAGE = {"unet": {"down": style_1_down, "mid": style_1_mid, "up": style_1_up}}
|
99 |
+
adapter_weight_scales_FILM = {"unet": {"down": style_2_down, "mid": style_2_mid, "up": style_2_up}}
|
100 |
+
adapter_weight_scales_MJ = {"unet": {"down": style_3_down, "mid": style_3_mid, "up": style_3_up}}
|
101 |
+
adapter_weight_scales_MORE_ART = {"unet": {"down": style_4_down, "mid": style_4_mid, "up": style_4_up}}
|
102 |
+
|
103 |
+
pipe.set_adapters(["ENGAGE_LORA", "FILM_LORA", "MJ_LORA", "MORE_ART_LORA"],
|
104 |
+
[adapter_weight_scales_ENGAGE,
|
105 |
+
adapter_weight_scales_FILM,
|
106 |
+
adapter_weight_scales_MJ,
|
107 |
+
adapter_weight_scales_MORE_ART])
|
108 |
+
else:
|
109 |
+
pipe.set_adapters(["ENGAGE_LORA", "FILM_LORA", "MJ_LORA", "MORE_ART_LORA"],
|
110 |
+
adapter_weights=[0.0, 0.0, 0.0, 0.0])
|
111 |
+
pipe.disable_lora()
|
112 |
+
|
113 |
+
images = run_text_to_image(prompt=prompt,
|
114 |
+
neg_prompt=neg_prompt,
|
115 |
+
num_images=num_images,
|
116 |
+
inference_steps=inference_steps,
|
117 |
+
height=height,
|
118 |
+
width=width,
|
119 |
+
guidance_scale=guidance_scale,
|
120 |
+
guidance_rescale=guidance_rescale,
|
121 |
+
condition_scale=condition_scale,
|
122 |
+
progress=progress,
|
123 |
+
exposure=exposure)
|
124 |
+
|
125 |
+
for idx, im in enumerate(images):
|
126 |
+
im = np.asarray(im).copy()
|
127 |
+
im[-88:, :700] = im[-88:, :700] * (1 - engage_logo_mask) + engage_logo_np
|
128 |
+
images[idx] = Image.fromarray(np.clip(im.astype('uint8'), 0, 255))
|
129 |
+
|
130 |
+
user_state["IMAGE_GALLERY"] += images
|
131 |
+
return user_state["IMAGE_GALLERY"], user_state
|
132 |
+
|
133 |
+
|
134 |
+
theme = gr.themes.Base(
|
135 |
+
primary_hue="neutral",
|
136 |
+
radius_size="none",
|
137 |
+
).set(
|
138 |
+
body_text_color_dark='*neutral_800',
|
139 |
+
embed_radius='*radius_xxs',
|
140 |
+
button_primary_background_fill='*primary_700',
|
141 |
+
button_primary_background_fill_hover='*primary_400',
|
142 |
+
button_primary_background_fill_hover_dark='*primary_400',
|
143 |
+
button_primary_border_color_dark='*primary_200',
|
144 |
+
button_primary_text_color='*primary_50',
|
145 |
+
button_primary_text_color_dark='*primary_50',
|
146 |
+
button_primary_text_color_hover='*primary_50'
|
147 |
+
)
|
148 |
+
with gr.Blocks(theme=theme) as engage_automotive_lora_demo:
|
149 |
+
session_state = gr.State(value={"IMAGE_GALLERY": [],
|
150 |
+
"SELECTED_IMAGE": None
|
151 |
+
})
|
152 |
+
diffused_image_out = gr.Gallery(label='Results', show_label=False,
|
153 |
+
columns=[3], rows=[1], object_fit="contain", height="auto",
|
154 |
+
format="png")
|
155 |
+
with gr.Group():
|
156 |
+
with gr.Row():
|
157 |
+
prompt_box = gr.Textbox("futuristic dark red car in a white studio",
|
158 |
+
label='Prompt')
|
159 |
+
generate_button = gr.Button("Generate", scale=0)
|
160 |
+
with gr.Row():
|
161 |
+
settings_dropdown = gr.Dropdown(
|
162 |
+
["Text to image", "From sketch", "Inpaint", "Inpaint sketch"], value="Text to image",
|
163 |
+
label="Mode", info="Text to image, prompt only. "
|
164 |
+
"From sketch, upload an initial image / sketch in the image editor. "
|
165 |
+
"Inpaint sketch, edits the chosen area of an image. Uses the initial "
|
166 |
+
"image as base for sketches."
|
167 |
+
)
|
168 |
+
with gr.Accordion("Image Editor", open=False):
|
169 |
+
condition_image = gr.ImageEditor(type='pil', show_label=False,
|
170 |
+
brush=gr.Brush(colors=["#000000"], color_mode="fixed"))
|
171 |
+
with gr.Row():
|
172 |
+
with gr.Accordion("Settings", open=False):
|
173 |
+
neg_prompt_box = gr.Textbox(
|
174 |
+
"blurry, poor quality, unrealistic",
|
175 |
+
label='Negative Prompt')
|
176 |
+
seed_box = gr.Number(42, label='Seed')
|
177 |
+
inference_steps = gr.Slider(0, 20, value=8,
|
178 |
+
label='Inference Steps', step=1)
|
179 |
+
num_images = gr.Slider(1, 3, value=2, label='Number of Images', step=1)
|
180 |
+
guidance_scale = gr.Slider(0, 10, value=1.5,
|
181 |
+
label='Guidance Scale', step=0.1)
|
182 |
+
guidance_rescale = gr.Slider(0.0, 1.0, value=0.0,
|
183 |
+
label='Guidance Rescale', step=0.1)
|
184 |
+
height = gr.Slider(128, 2048, value=1024, label='Image Height', step=64)
|
185 |
+
width = gr.Slider(128, 2048, value=1024, label='Image Width', step=64)
|
186 |
+
condition_influence = gr.Slider(0.0, 1.0, value=0.5, label='Condition Influence')
|
187 |
+
sketch_detail = gr.Slider(0.0, 1.0, value=0.5, label='Sketch Detail')
|
188 |
+
sketch_softness = gr.Slider(0.0, 1.0, value=0.5, label='Sketch Softness')
|
189 |
+
inpaint_strength = gr.Slider(0.0, 1.0, value=0.8, label='Inpaint Strength')
|
190 |
+
enable_freeu = gr.Checkbox(True, label='FreeU',
|
191 |
+
info='Enables FreeU scaling factors.')
|
192 |
+
with gr.Accordion("Stylation (Experimental)", open=False):
|
193 |
+
with gr.Row():
|
194 |
+
exposure = gr.Slider(-1.0, 1.0, value=0.0, label='Exposure')
|
195 |
+
enable_stylation = gr.Checkbox(label='Enable Stylation',
|
196 |
+
info='EXPERIMENTAL: We apologize for the ambiguity, '
|
197 |
+
'please play around with the sliders to '
|
198 |
+
'find a style you like!'
|
199 |
+
'Warning: Will slow down the generation time.')
|
200 |
+
with gr.Accordion("Style A - Engage Studios Futuristic", open=False):
|
201 |
+
style_A_down = gr.Slider(-1.0, 1.0, value=0.0, label='down')
|
202 |
+
style_A_mid = gr.Slider(-1.0, 1.0, value=0.0, label='mid')
|
203 |
+
style_A_up = gr.Slider(-1.0, 1.0, value=0.0, label='up')
|
204 |
+
with gr.Accordion("Style B - Lighting", open=False):
|
205 |
+
style_B_down = gr.Slider(-1.0, 1.0, value=0.0, label='down')
|
206 |
+
style_B_mid = gr.Slider(-1.0, 1.0, value=0.0, label='mid')
|
207 |
+
style_B_up = gr.Slider(-1.0, 1.0, value=0.0, label='up')
|
208 |
+
with gr.Accordion("Style C - Details A", open=False):
|
209 |
+
style_C_down = gr.Slider(-1.0, 1.0, value=0.0, label='down')
|
210 |
+
style_C_mid = gr.Slider(-1.0, 1.0, value=0.0, label='mid')
|
211 |
+
style_C_up = gr.Slider(-1.0, 1.0, value=0.0, label='up')
|
212 |
+
with gr.Accordion("Style D - Details B", open=False):
|
213 |
+
style_D_down = gr.Slider(-1.0, 1.0, value=0.0, label='down')
|
214 |
+
style_D_mid = gr.Slider(-1.0, 1.0, value=0.0, label='mid')
|
215 |
+
style_D_up = gr.Slider(-1.0, 1.0, value=0.0, label='up')
|
216 |
+
|
217 |
+
generate_button.click(run_model,
|
218 |
+
inputs=[session_state,
|
219 |
+
condition_image,
|
220 |
+
settings_dropdown,
|
221 |
+
prompt_box,
|
222 |
+
neg_prompt_box,
|
223 |
+
|
224 |
+
inference_steps,
|
225 |
+
num_images,
|
226 |
+
|
227 |
+
guidance_scale,
|
228 |
+
guidance_rescale,
|
229 |
+
enable_freeu,
|
230 |
+
|
231 |
+
height,
|
232 |
+
width,
|
233 |
+
|
234 |
+
condition_influence,
|
235 |
+
|
236 |
+
sketch_detail,
|
237 |
+
sketch_softness,
|
238 |
+
inpaint_strength,
|
239 |
+
|
240 |
+
exposure,
|
241 |
+
enable_stylation,
|
242 |
+
|
243 |
+
style_A_down,
|
244 |
+
style_A_mid,
|
245 |
+
style_A_up,
|
246 |
+
|
247 |
+
style_B_down,
|
248 |
+
style_B_mid,
|
249 |
+
style_B_up,
|
250 |
+
|
251 |
+
style_C_down,
|
252 |
+
style_C_mid,
|
253 |
+
style_C_up,
|
254 |
+
|
255 |
+
style_D_down,
|
256 |
+
style_D_mid,
|
257 |
+
style_D_up,
|
258 |
+
|
259 |
+
seed_box],
|
260 |
+
outputs=[diffused_image_out, session_state],
|
261 |
+
show_progress=True)
|
262 |
+
|
263 |
+
engage_automotive_lora_demo.launch()
|