jhj0517 commited on
Commit
a92726a
2 Parent(s): 0a8c924 7ecc5a8

Merge pull request #2 from jhj0517/feature/automatic_output

Browse files
Files changed (1) hide show
  1. app.py +21 -11
app.py CHANGED
@@ -21,6 +21,12 @@ class App:
21
  if not args.disable_model_download_at_start:
22
  download_models(model_dir=args.model_dir)
23
 
 
 
 
 
 
 
24
  def musepose_demo(self):
25
  with gr.Blocks() as demo:
26
  md_header = self.header()
@@ -28,11 +34,11 @@ class App:
28
  with gr.TabItem('Step1: Pose Alignment'):
29
  with gr.Row():
30
  with gr.Column(scale=3):
31
- img_input = gr.Image(label="Input Image here", type="filepath", scale=5)
32
  vid_dance_input = gr.Video(label="Input Dance Video", scale=5)
33
  with gr.Column(scale=3):
34
- vid_dance_output = gr.Video(label="Aligned pose output will be displayed here", scale=5)
35
- vid_dance_output_demo = gr.Video(label="Output demo video will be displayed here", scale=5)
36
  with gr.Column(scale=3):
37
  with gr.Column():
38
  nb_detect_resolution = gr.Number(label="Detect Resolution", value=512, precision=0)
@@ -47,25 +53,25 @@ class App:
47
  [os.path.join("assets", "videos", "dance.mp4"), os.path.join("assets", "images", "ref.png"),
48
  512, 720, 0, 300]]
49
  ex_step1 = gr.Examples(examples=examples,
50
- inputs=[vid_dance_input, img_input, nb_detect_resolution,
51
  nb_image_resolution, nb_align_frame, nb_max_frame],
52
  outputs=[vid_dance_output, vid_dance_output_demo],
53
  fn=self.pose_alignment_infer.align_pose,
54
  cache_examples="lazy")
55
 
56
  btn_align_pose.click(fn=self.pose_alignment_infer.align_pose,
57
- inputs=[vid_dance_input, img_input, nb_detect_resolution, nb_image_resolution,
58
  nb_align_frame, nb_max_frame],
59
  outputs=[vid_dance_output, vid_dance_output_demo])
60
 
61
  with gr.TabItem('Step2: MusePose Inference'):
62
  with gr.Row():
63
  with gr.Column(scale=3):
64
- img_input = gr.Image(label="Input Image here", type="filepath", scale=5)
65
- vid_pose_input = gr.Video(label="Input Aligned Pose Video here", scale=5)
66
  with gr.Column(scale=3):
67
- vid_output = gr.Video(label="Output Video will be displayed here", scale=5)
68
- vid_output_demo = gr.Video(label="Output demo video will be displayed here", scale=5)
69
 
70
  with gr.Column(scale=3):
71
  with gr.Column():
@@ -93,7 +99,7 @@ class App:
93
  [os.path.join("assets", "images", "ref.png"), os.path.join("assets", "videos", "pose.mp4"),
94
  "fp16", 512, 512, 300, 48, 4, 3.5, 99, 20, -1, 1]]
95
  ex_step2 = gr.Examples(examples=examples,
96
- inputs=[img_input, vid_pose_input, weight_dtype, nb_width, nb_height,
97
  nb_video_frame_length, nb_video_slice_frame_length,
98
  nb_video_slice_overlap_frame_number, nb_cfg, nb_seed, nb_steps,
99
  nb_fps, nb_skip],
@@ -102,11 +108,15 @@ class App:
102
  cache_examples="lazy")
103
 
104
  btn_generate.click(fn=self.musepose_infer.infer_musepose,
105
- inputs=[img_input, vid_pose_input, weight_dtype, nb_width, nb_height,
106
  nb_video_frame_length, nb_video_slice_frame_length,
107
  nb_video_slice_overlap_frame_number, nb_cfg, nb_seed, nb_steps, nb_fps,
108
  nb_skip],
109
  outputs=[vid_output, vid_output_demo])
 
 
 
 
110
  return demo
111
 
112
  @staticmethod
 
21
  if not args.disable_model_download_at_start:
22
  download_models(model_dir=args.model_dir)
23
 
24
+ @staticmethod
25
+ def on_step1_complete(input_img: str, input_pose_vid: str):
26
+
27
+ return [gr.Image(label="Input Image", value=input_img, type="filepath", scale=5),
28
+ gr.Video(label="Input Aligned Pose Video", value=input_pose_vid, scale=5)]
29
+
30
  def musepose_demo(self):
31
  with gr.Blocks() as demo:
32
  md_header = self.header()
 
34
  with gr.TabItem('Step1: Pose Alignment'):
35
  with gr.Row():
36
  with gr.Column(scale=3):
37
+ img_pose_input = gr.Image(label="Input Image", type="filepath", scale=5)
38
  vid_dance_input = gr.Video(label="Input Dance Video", scale=5)
39
  with gr.Column(scale=3):
40
+ vid_dance_output = gr.Video(label="Aligned Pose Output", scale=5)
41
+ vid_dance_output_demo = gr.Video(label="Aligned Pose Output Demo", scale=5)
42
  with gr.Column(scale=3):
43
  with gr.Column():
44
  nb_detect_resolution = gr.Number(label="Detect Resolution", value=512, precision=0)
 
53
  [os.path.join("assets", "videos", "dance.mp4"), os.path.join("assets", "images", "ref.png"),
54
  512, 720, 0, 300]]
55
  ex_step1 = gr.Examples(examples=examples,
56
+ inputs=[vid_dance_input, img_pose_input, nb_detect_resolution,
57
  nb_image_resolution, nb_align_frame, nb_max_frame],
58
  outputs=[vid_dance_output, vid_dance_output_demo],
59
  fn=self.pose_alignment_infer.align_pose,
60
  cache_examples="lazy")
61
 
62
  btn_align_pose.click(fn=self.pose_alignment_infer.align_pose,
63
+ inputs=[vid_dance_input, img_pose_input, nb_detect_resolution, nb_image_resolution,
64
  nb_align_frame, nb_max_frame],
65
  outputs=[vid_dance_output, vid_dance_output_demo])
66
 
67
  with gr.TabItem('Step2: MusePose Inference'):
68
  with gr.Row():
69
  with gr.Column(scale=3):
70
+ img_musepose_input = gr.Image(label="Input Image", type="filepath", scale=5)
71
+ vid_pose_input = gr.Video(label="Input Aligned Pose Video", scale=5)
72
  with gr.Column(scale=3):
73
+ vid_output = gr.Video(label="MusePose Output", scale=5)
74
+ vid_output_demo = gr.Video(label="MusePose Output Demo", scale=5)
75
 
76
  with gr.Column(scale=3):
77
  with gr.Column():
 
99
  [os.path.join("assets", "images", "ref.png"), os.path.join("assets", "videos", "pose.mp4"),
100
  "fp16", 512, 512, 300, 48, 4, 3.5, 99, 20, -1, 1]]
101
  ex_step2 = gr.Examples(examples=examples,
102
+ inputs=[img_musepose_input, vid_pose_input, weight_dtype, nb_width, nb_height,
103
  nb_video_frame_length, nb_video_slice_frame_length,
104
  nb_video_slice_overlap_frame_number, nb_cfg, nb_seed, nb_steps,
105
  nb_fps, nb_skip],
 
108
  cache_examples="lazy")
109
 
110
  btn_generate.click(fn=self.musepose_infer.infer_musepose,
111
+ inputs=[img_musepose_input, vid_pose_input, weight_dtype, nb_width, nb_height,
112
  nb_video_frame_length, nb_video_slice_frame_length,
113
  nb_video_slice_overlap_frame_number, nb_cfg, nb_seed, nb_steps, nb_fps,
114
  nb_skip],
115
  outputs=[vid_output, vid_output_demo])
116
+ vid_dance_output.change(fn=self.on_step1_complete,
117
+ inputs=[img_pose_input, vid_dance_output],
118
+ outputs=[img_musepose_input, vid_pose_input])
119
+
120
  return demo
121
 
122
  @staticmethod