Spaces:
Runtime error
Runtime error
HeliosZhao
commited on
Commit
•
0c3bec0
1
Parent(s):
bd0ad83
update mask start step
Browse files- app.py +14 -0
- inference.py +2 -0
app.py
CHANGED
@@ -188,6 +188,13 @@ with gr.Blocks(css='style.css') as demo:
|
|
188 |
maximum=100,
|
189 |
step=1,
|
190 |
value=50)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
guidance_scale = gr.Slider(label='CFG Scale',
|
192 |
minimum=0,
|
193 |
maximum=50,
|
@@ -222,6 +229,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
222 |
'data/ikun/reference_images/zhongli.jpg',
|
223 |
'man',
|
224 |
0,
|
|
|
225 |
0.5,
|
226 |
0.5,
|
227 |
0,
|
@@ -239,6 +247,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
239 |
'data/huaqiang/reference_images/musk.jpg',
|
240 |
'man',
|
241 |
0,
|
|
|
242 |
0.5,
|
243 |
0.5,
|
244 |
0,
|
@@ -256,6 +265,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
256 |
'data/yanzi/reference_images/panda.jpeg',
|
257 |
'panda',
|
258 |
0,
|
|
|
259 |
0.5,
|
260 |
0.5,
|
261 |
0,
|
@@ -273,6 +283,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
273 |
'data/car-turn/reference_images/audi.jpeg',
|
274 |
'car',
|
275 |
0,
|
|
|
276 |
0.0,
|
277 |
1.0,
|
278 |
0,
|
@@ -290,6 +301,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
290 |
'data/car-turn/images/0000.jpg',
|
291 |
'car',
|
292 |
0,
|
|
|
293 |
0.0,
|
294 |
1.0,
|
295 |
1,
|
@@ -309,6 +321,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
309 |
ref_image,
|
310 |
ref_pro_prompt,
|
311 |
noise_level,
|
|
|
312 |
control_pose,
|
313 |
control_depth,
|
314 |
source_pro,
|
@@ -338,6 +351,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
338 |
ref_image,
|
339 |
ref_pro_prompt,
|
340 |
noise_level,
|
|
|
341 |
control_pose,
|
342 |
control_depth,
|
343 |
source_pro,
|
|
|
188 |
maximum=100,
|
189 |
step=1,
|
190 |
value=50)
|
191 |
+
|
192 |
+
start_step = gr.Slider(label='Mask Starting Step',
|
193 |
+
minimum=0,
|
194 |
+
maximum=100,
|
195 |
+
step=1,
|
196 |
+
value=0)
|
197 |
+
|
198 |
guidance_scale = gr.Slider(label='CFG Scale',
|
199 |
minimum=0,
|
200 |
maximum=50,
|
|
|
229 |
'data/ikun/reference_images/zhongli.jpg',
|
230 |
'man',
|
231 |
0,
|
232 |
+
0,
|
233 |
0.5,
|
234 |
0.5,
|
235 |
0,
|
|
|
247 |
'data/huaqiang/reference_images/musk.jpg',
|
248 |
'man',
|
249 |
0,
|
250 |
+
0,
|
251 |
0.5,
|
252 |
0.5,
|
253 |
0,
|
|
|
265 |
'data/yanzi/reference_images/panda.jpeg',
|
266 |
'panda',
|
267 |
0,
|
268 |
+
0,
|
269 |
0.5,
|
270 |
0.5,
|
271 |
0,
|
|
|
283 |
'data/car-turn/reference_images/audi.jpeg',
|
284 |
'car',
|
285 |
0,
|
286 |
+
0,
|
287 |
0.0,
|
288 |
1.0,
|
289 |
0,
|
|
|
301 |
'data/car-turn/images/0000.jpg',
|
302 |
'car',
|
303 |
0,
|
304 |
+
0,
|
305 |
0.0,
|
306 |
1.0,
|
307 |
1,
|
|
|
321 |
ref_image,
|
322 |
ref_pro_prompt,
|
323 |
noise_level,
|
324 |
+
start_step,
|
325 |
control_pose,
|
326 |
control_depth,
|
327 |
source_pro,
|
|
|
351 |
ref_image,
|
352 |
ref_pro_prompt,
|
353 |
noise_level,
|
354 |
+
start_step,
|
355 |
control_pose,
|
356 |
control_depth,
|
357 |
source_pro,
|
inference.py
CHANGED
@@ -204,6 +204,7 @@ class InferencePipeline:
|
|
204 |
ref_image: PIL.Image.Image,
|
205 |
ref_pro_prompt: str,
|
206 |
noise_level: int,
|
|
|
207 |
control_pose: float,
|
208 |
control_depth: float,
|
209 |
source_pro: int = 0, # 0 or 1
|
@@ -266,6 +267,7 @@ class InferencePipeline:
|
|
266 |
masks=masks,
|
267 |
mask_mode='all',
|
268 |
mask_latent_fuse_mode = 'all',
|
|
|
269 |
## edit bg and pro
|
270 |
prior_latents=None,
|
271 |
image_embeds=image_embed, # keep pro
|
|
|
204 |
ref_image: PIL.Image.Image,
|
205 |
ref_pro_prompt: str,
|
206 |
noise_level: int,
|
207 |
+
start_step: int,
|
208 |
control_pose: float,
|
209 |
control_depth: float,
|
210 |
source_pro: int = 0, # 0 or 1
|
|
|
267 |
masks=masks,
|
268 |
mask_mode='all',
|
269 |
mask_latent_fuse_mode = 'all',
|
270 |
+
start_step=start_step,
|
271 |
## edit bg and pro
|
272 |
prior_latents=None,
|
273 |
image_embeds=image_embed, # keep pro
|