Pie31415 commited on
Commit
e2f5469
1 Parent(s): 1d2c610
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
 
3
  from text_to_animation.model import ControlAnimationModel
4
- from webui.app_pose import create_demo as create_demo_pose
5
- from webui.app_text_to_video import create_demo as create_demo_text_to_video
6
  from webui.app_control_animation import create_demo as create_demo_animation
7
  import argparse
8
  import os
 
1
  import gradio as gr
2
 
3
  from text_to_animation.model import ControlAnimationModel
 
 
4
  from webui.app_control_animation import create_demo as create_demo_animation
5
  import argparse
6
  import os
text_to_animation/model.py CHANGED
@@ -120,6 +120,7 @@ class ControlAnimationModel:
120
  prompt: str,
121
  video_path: str,
122
  n_prompt: str = "",
 
123
  num_imgs: int = 4,
124
  resolution: int = 512,
125
  model_id: str = "runwayml/stable-diffusion-v1-5",
@@ -139,9 +140,8 @@ class ControlAnimationModel:
139
  )
140
  control = utils.pre_process_pose(video, apply_pose_detect=False)
141
 
142
- seeds = [seed for seed in jax.random.randint(self.rng, [num_imgs], 0, 65536)]
143
- prngs = [jax.random.PRNGKey(seed) for seed in seeds]
144
- print(seeds)
145
  images = self.pipe.generate_starting_frames(
146
  params=self.p_params,
147
  prngs=prngs,
@@ -152,9 +152,9 @@ class ControlAnimationModel:
152
 
153
  images = [np.array(images[i]) for i in range(images.shape[0])]
154
 
155
- return images
156
 
157
- def generate_video_from_frame(self, controlnet_video, prompt, seed, neg_prompt=""):
158
  # generate a video using the seed provided
159
  prng_seed = jax.random.PRNGKey(seed)
160
  len_vid = controlnet_video.shape[0]
@@ -163,7 +163,7 @@ class ControlAnimationModel:
163
  prompts = added_prompt + ", " + prompt
164
 
165
  added_n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly"
166
- negative_prompts = added_n_prompt + ", " + neg_prompt
167
 
168
  # prompt_ids = self.pipe.prepare_text_inputs(["aardman style "+ prompt]*len_vid)
169
  # n_prompt_ids = self.pipe.prepare_text_inputs([neg_prompt]*len_vid)
 
120
  prompt: str,
121
  video_path: str,
122
  n_prompt: str = "",
123
+ seed: int = 0,
124
  num_imgs: int = 4,
125
  resolution: int = 512,
126
  model_id: str = "runwayml/stable-diffusion-v1-5",
 
140
  )
141
  control = utils.pre_process_pose(video, apply_pose_detect=False)
142
 
143
+ # seeds = [seed for seed in jax.random.randint(self.rng, [num_imgs], 0, 65536)]
144
+ prngs = [jax.random.PRNGKey(seed)] * num_imgs
 
145
  images = self.pipe.generate_starting_frames(
146
  params=self.p_params,
147
  prngs=prngs,
 
152
 
153
  images = [np.array(images[i]) for i in range(images.shape[0])]
154
 
155
+ return video, images
156
 
157
+ def generate_video_from_frame(self, controlnet_video, prompt, n_prompt, seed):
158
  # generate a video using the seed provided
159
  prng_seed = jax.random.PRNGKey(seed)
160
  len_vid = controlnet_video.shape[0]
 
163
  prompts = added_prompt + ", " + prompt
164
 
165
  added_n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly"
166
+ negative_prompts = added_n_prompt + ", " + n_prompt
167
 
168
  # prompt_ids = self.pipe.prepare_text_inputs(["aardman style "+ prompt]*len_vid)
169
  # n_prompt_ids = self.pipe.prepare_text_inputs([neg_prompt]*len_vid)
webui/app_control_animation.py CHANGED
@@ -38,11 +38,11 @@ def create_demo(model: ControlAnimationModel):
38
  with gr.Row():
39
  with gr.Column():
40
  # TODO: update so that model_link is customizable
41
- model_link = gr.Dropdown(
42
- label="Model Link",
43
- choices=["runwayml/stable-diffusion-v1-5"],
44
- value="runwayml/stable-diffusion-v1-5",
45
- )
46
  prompt = gr.Textbox(
47
  placeholder="Prompt",
48
  show_label=False,
@@ -145,7 +145,7 @@ def create_demo(model: ControlAnimationModel):
145
  ("__assets__/dance4.gif", "Motion 8"),
146
  ("__assets__/dance5.gif", "Motion 9"),
147
  ],
148
- ).style(columns=3)
149
  input_video_path = gr.Textbox(
150
  label="Pose Sequence", visible=False, value="Motion 1"
151
  )
@@ -155,7 +155,7 @@ def create_demo(model: ControlAnimationModel):
155
  with gr.Column(visible=True) as frame_selection_view:
156
  initial_frames = gr.Gallery(
157
  label="Initial Frames", show_label=False
158
- ).style(columns=4, rows=1, object_fit="contain", preview=True)
159
 
160
  gr.Markdown("Select an initial frame to start your animation with.")
161
 
@@ -168,6 +168,7 @@ def create_demo(model: ControlAnimationModel):
168
  result = gr.Image(label="Generated Video")
169
 
170
  with gr.Box(visible=False):
 
171
  initial_frame_index = gr.Number(
172
  label="Selected Initial Frame Index", value=-1, precision=0
173
  )
@@ -180,22 +181,25 @@ def create_demo(model: ControlAnimationModel):
180
  prompt,
181
  input_video_path,
182
  negative_prompt,
 
183
  ]
184
 
185
  animation_inputs = [
 
186
  prompt,
187
- initial_frame_index,
188
- input_video_path,
189
- model_link,
190
- motion_field_strength_x,
191
- motion_field_strength_y,
192
- t0,
193
- t1,
 
 
 
 
194
  negative_prompt,
195
- chunk_size,
196
- video_length,
197
- merging_ratio,
198
- seed,
199
  ]
200
 
201
  def submit_select(initial_frame_index: int):
@@ -213,7 +217,7 @@ def create_demo(model: ControlAnimationModel):
213
  gen_frames_button.click(
214
  fn=model.generate_initial_frames,
215
  inputs=frame_inputs,
216
- outputs=initial_frames,
217
  )
218
 
219
  gen_animation_button.click(
@@ -221,17 +225,17 @@ def create_demo(model: ControlAnimationModel):
221
  inputs=initial_frame_index,
222
  outputs=[frame_selection_view, animation_view],
223
  ).then(
224
- fn=model.generate_animation,
225
  inputs=animation_inputs,
226
  outputs=result,
227
  )
228
 
229
- # gr.Examples(examples=examples,
230
- # inputs=animation_inputs,
231
- # outputs=result,
232
- # fn=model.generate_animation,
233
- # cache_examples=on_huggingspace,
234
- # run_on_click=True,
235
- # )
236
 
237
  return demo
 
38
  with gr.Row():
39
  with gr.Column():
40
  # TODO: update so that model_link is customizable
41
+ # model_link = gr.Dropdown(
42
+ # label="Model Link",
43
+ # choices=["runwayml/stable-diffusion-v1-5"],
44
+ # value="runwayml/stable-diffusion-v1-5",
45
+ # )
46
  prompt = gr.Textbox(
47
  placeholder="Prompt",
48
  show_label=False,
 
145
  ("__assets__/dance4.gif", "Motion 8"),
146
  ("__assets__/dance5.gif", "Motion 9"),
147
  ],
148
+ ).style(grid=3, columns=3)
149
  input_video_path = gr.Textbox(
150
  label="Pose Sequence", visible=False, value="Motion 1"
151
  )
 
155
  with gr.Column(visible=True) as frame_selection_view:
156
  initial_frames = gr.Gallery(
157
  label="Initial Frames", show_label=False
158
+ ).style(grid=4, columns=4, rows=1, object_fit="contain", preview=True)
159
 
160
  gr.Markdown("Select an initial frame to start your animation with.")
161
 
 
168
  result = gr.Image(label="Generated Video")
169
 
170
  with gr.Box(visible=False):
171
+ controlnet_video = gr.Video(label="ControlNet Video")
172
  initial_frame_index = gr.Number(
173
  label="Selected Initial Frame Index", value=-1, precision=0
174
  )
 
181
  prompt,
182
  input_video_path,
183
  negative_prompt,
184
+ seed,
185
  ]
186
 
187
  animation_inputs = [
188
+ controlnet_video,
189
  prompt,
190
+ # initial_frame_index,
191
+ # input_video_path,
192
+ # model_link,
193
+ # motion_field_strength_x,
194
+ # motion_field_strength_y,
195
+ # t0,
196
+ # t1,
197
+ # negative_prompt,
198
+ # chunk_size,
199
+ # video_length,
200
+ # merging_ratio,
201
  negative_prompt,
202
+ seed
 
 
 
203
  ]
204
 
205
  def submit_select(initial_frame_index: int):
 
217
  gen_frames_button.click(
218
  fn=model.generate_initial_frames,
219
  inputs=frame_inputs,
220
+ outputs=[controlnet_video, initial_frames],
221
  )
222
 
223
  gen_animation_button.click(
 
225
  inputs=initial_frame_index,
226
  outputs=[frame_selection_view, animation_view],
227
  ).then(
228
+ fn=model.generate_video_from_frame,
229
  inputs=animation_inputs,
230
  outputs=result,
231
  )
232
 
233
+ # gr.Examples(examples=examples,
234
+ # inputs=animation_inputs,
235
+ # outputs=result,
236
+ # fn=model.generate_animation,
237
+ # cache_examples=on_huggingspace,
238
+ # run_on_click=True,
239
+ # )
240
 
241
  return demo