LeoXing1996 commited on
Commit
558e11c
โ€ข
1 Parent(s): d4ba169

add quick run button

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app-huggingface.py +92 -7
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Demo Space
3
  emoji: ๐Ÿค—
4
  colorFrom: yellow
5
  colorTo: pink
 
1
  ---
2
+ title: "PIA: Your Personalized Image Animator"
3
  emoji: ๐Ÿค—
4
  colorFrom: yellow
5
  colorTo: pink
app-huggingface.py CHANGED
@@ -1,4 +1,5 @@
1
  import json
 
2
  import os
3
  import os.path as osp
4
  import random
@@ -205,7 +206,7 @@ class AnimateController:
205
  def fetch_default_n_prompt(self, style: str):
206
  cfg = self.style_configs[style]
207
  n_prompt = cfg.get('n_prompt', '')
208
- ip_adapter_scale = cfg.get('real_ip_adapter_scale', 0)
209
 
210
  gr.Info('Set default negative prompt and ip_adapter_scale.')
211
  print('Set default negative prompt and ip_adapter_scale.')
@@ -223,9 +224,18 @@ class AnimateController:
223
  seed_textbox,
224
  ip_adapter_scale,
225
  style,
 
226
  progress=gr.Progress(),
227
  ):
228
 
 
 
 
 
 
 
 
 
229
  if seed_textbox != -1 and seed_textbox != "":
230
  torch.manual_seed(int(seed_textbox))
231
  else:
@@ -268,7 +278,7 @@ class AnimateController:
268
  f.write(json_str)
269
  f.write("\n\n")
270
 
271
- return save_sample_path
272
 
273
 
274
  controller = AnimateController()
@@ -290,10 +300,30 @@ def ui():
290
  )
291
 
292
  with gr.Row(equal_height=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  with gr.Column():
294
  with gr.Row():
295
  init_img = gr.Image(label='Input Image')
296
 
 
 
 
 
 
 
 
297
  style_dropdown = gr.Dropdown(label='Style', choices=list(
298
  STYLE_CONFIG_LIST.keys()), value=list(STYLE_CONFIG_LIST.keys())[0])
299
 
@@ -323,16 +353,15 @@ def ui():
323
  motion_scale_silder = gr.Slider(
324
  label='Motion Scale (Larger value means larger motion but less identity consistency)',
325
  value=1, step=1, minimum=1, maximum=len(RANGE_LIST))
326
- ip_adapter_scale = gr.Slider(
327
- label='IP-Apdater Scale', value=controller.fetch_default_n_prompt(
328
- list(STYLE_CONFIG_LIST.keys())[0])[1], minimum=0, maximum=1)
329
 
330
  with gr.Accordion('Advance Options', open=False):
331
  negative_prompt_textbox = gr.Textbox(
332
  value=controller.fetch_default_n_prompt(
333
  list(STYLE_CONFIG_LIST.keys())[0])[0],
334
  label="Negative prompt", lines=2)
335
-
 
 
336
  sample_step_slider = gr.Slider(
337
  label="Sampling steps", value=20, minimum=10, maximum=100, step=1)
338
 
@@ -374,8 +403,64 @@ def ui():
374
  ip_adapter_scale,
375
  style_dropdown,
376
  ],
377
- outputs=[result_video]
378
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
 
380
  def create_example(input_list):
381
  return gr.Examples(
 
1
  import json
2
+ import copy
3
  import os
4
  import os.path as osp
5
  import random
 
206
  def fetch_default_n_prompt(self, style: str):
207
  cfg = self.style_configs[style]
208
  n_prompt = cfg.get('n_prompt', '')
209
+ ip_adapter_scale = cfg.get('ip_adapter_scale', 0)
210
 
211
  gr.Info('Set default negative prompt and ip_adapter_scale.')
212
  print('Set default negative prompt and ip_adapter_scale.')
 
224
  seed_textbox,
225
  ip_adapter_scale,
226
  style,
227
+ example_img=None,
228
  progress=gr.Progress(),
229
  ):
230
 
231
+ if init_img is None:
232
+ if example_img is None:
233
+ gr.Warning('Please upload image or use example images.')
234
+ else:
235
+ gr.Info('Use exmaple image for fast try!')
236
+ init_img = example_img
237
+
238
+ init_img_bk = copy.deepcopy(init_img)
239
  if seed_textbox != -1 and seed_textbox != "":
240
  torch.manual_seed(int(seed_textbox))
241
  else:
 
278
  f.write(json_str)
279
  f.write("\n\n")
280
 
281
+ return init_img_bk, save_sample_path
282
 
283
 
284
  controller = AnimateController()
 
300
  )
301
 
302
  with gr.Row(equal_height=False):
303
+ # build state for default buttons
304
+ example_img = gr.State(value=np.array(Image.open('__assets__/image_animation/zhening/zhening.jpeg')))
305
+ default_motion = gr.State(value=1)
306
+ default_prompt1 = gr.State(value='lift a red envelope, Chinese new year')
307
+ default_prompt2 = gr.State(value='New Year\'s greetings, Chinese new year')
308
+ default_prompt3 = gr.State(value='Chinese costume, Chinese new year')
309
+ default_prompt4 = gr.State(value='sparklers, Chinese new year')
310
+ default_n_prompt = gr.State(value='wrong white balance, dark, sketches,worst quality,low quality, deformed, distorted, disfigured, bad eyes, wrong lips,weird mouth, bad teeth, mutated hands and fingers, bad anatomy,wrong anatomy, amputation, extra limb, missing limb, floating,limbs, disconnected limbs, mutation, ugly, disgusting, bad_pictures, negative_hand-neg')
311
+ default_seed = gr.State(10201304011203481448)
312
+ default_ip_adapter_scale = gr.State(0.2)
313
+ default_style = gr.State('3d_cartoon')
314
+ default_cfg = gr.State(7.5)
315
+
316
  with gr.Column():
317
  with gr.Row():
318
  init_img = gr.Image(label='Input Image')
319
 
320
+ gr.Markdown('## Fast Try!')
321
+ with gr.Row():
322
+ default_1 = gr.Button('๐Ÿงง', variant='primary')
323
+ default_2 = gr.Button('๐Ÿฎ', variant='primary')
324
+ default_3 = gr.Button('๐Ÿ‘จโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ฆ', variant='primary')
325
+ default_4 = gr.Button('๐Ÿงจ', variant='primary')
326
+
327
  style_dropdown = gr.Dropdown(label='Style', choices=list(
328
  STYLE_CONFIG_LIST.keys()), value=list(STYLE_CONFIG_LIST.keys())[0])
329
 
 
353
  motion_scale_silder = gr.Slider(
354
  label='Motion Scale (Larger value means larger motion but less identity consistency)',
355
  value=1, step=1, minimum=1, maximum=len(RANGE_LIST))
 
 
 
356
 
357
  with gr.Accordion('Advance Options', open=False):
358
  negative_prompt_textbox = gr.Textbox(
359
  value=controller.fetch_default_n_prompt(
360
  list(STYLE_CONFIG_LIST.keys())[0])[0],
361
  label="Negative prompt", lines=2)
362
+ ip_adapter_scale = gr.Slider(
363
+ label='IP-Apdater Scale', value=controller.fetch_default_n_prompt(
364
+ list(STYLE_CONFIG_LIST.keys())[0])[1], minimum=0, maximum=1)
365
  sample_step_slider = gr.Slider(
366
  label="Sampling steps", value=20, minimum=10, maximum=100, step=1)
367
 
 
403
  ip_adapter_scale,
404
  style_dropdown,
405
  ],
406
+ outputs=[init_img, result_video]
407
  )
408
+ default_1.click(
409
+ fn=controller.animate,
410
+ inputs=[
411
+ init_img,
412
+ default_motion,
413
+ default_prompt1,
414
+ default_n_prompt,
415
+ default_cfg ,
416
+ default_seed,
417
+ default_ip_adapter_scale,
418
+ default_style,
419
+ example_img,
420
+ ],
421
+ outputs=[init_img, result_video])
422
+ default_2.click(
423
+ fn=controller.animate,
424
+ inputs=[
425
+ init_img,
426
+ default_motion,
427
+ default_prompt2,
428
+ default_n_prompt,
429
+ default_cfg ,
430
+ default_seed,
431
+ default_ip_adapter_scale,
432
+ default_style,
433
+ example_img,
434
+ ],
435
+ outputs=[init_img, result_video])
436
+ default_3.click(
437
+ fn=controller.animate,
438
+ inputs=[
439
+ init_img,
440
+ default_motion,
441
+ default_prompt3,
442
+ default_n_prompt,
443
+ default_cfg ,
444
+ default_seed,
445
+ default_ip_adapter_scale,
446
+ default_style,
447
+ example_img,
448
+ ],
449
+ outputs=[init_img, result_video])
450
+ default_4.click(
451
+ fn=controller.animate,
452
+ inputs=[
453
+ init_img,
454
+ default_motion,
455
+ default_prompt4,
456
+ default_n_prompt,
457
+ default_cfg ,
458
+ default_seed,
459
+ default_ip_adapter_scale,
460
+ default_style,
461
+ example_img,
462
+ ],
463
+ outputs=[init_img, result_video])
464
 
465
  def create_example(input_list):
466
  return gr.Examples(