ruizhaocv commited on
Commit
54c67a0
1 Parent(s): 2424db5

Upload 2 files

Browse files
demo/MotionDirector_gradio.py CHANGED
@@ -64,11 +64,11 @@ with gr.Blocks() as demo:
64
  "3-4: [Sports Concepts] -- Playing Golf",
65
  "3-5: [Sports Concepts] -- Skateboarding",
66
  ],
67
- label="MotionDirector",
68
  info="Which MotionDirector would you like to use!"
69
  )
70
 
71
- text_pormpt = gr.Textbox(label="Text Prompt", value='', placeholder="Input your text prompt here!")
72
  neg_text_pormpt = gr.Textbox(label="Negative Text Prompt", value='', placeholder="default: None")
73
 
74
  submit = gr.Button("Generate")
@@ -80,18 +80,19 @@ with gr.Blocks() as demo:
80
  [generated_video, generated_video_baseline]
81
  )
82
 
 
83
  # Examples
84
  gr.Markdown("## Examples")
85
  gr.Examples(
86
  fn=motiondirector,
87
  examples=[
88
  ["1-1: [Cinematic Shots] -- Zoom Out", "A spaceman standing on the moon captured with a zoom out.",
89
- 8323920],
90
  ["1-2: [Cinematic Shots] -- Zoom In", "A polar bear standing at the top of a snowy mountain captured with a zoom in.", 7938587],
91
  ["1-3: [Cinematic Shots] -- Dolly Zoom (Hitchcockian Zoom) 1", "A panda standing in front of an ancient Chinese temple captured with a dolly zoom.", 8238823],
92
  ["1-4: [Cinematic Shots] -- Dolly Zoom (Hitchcockian Zoom) 2", "A lion sitting on top of a cliff captured with a dolly zoom.", 1675932],
93
  ["1-5: [Cinematic Shots] -- Follow", "A fireman is walking through fire captured with a follow cinematic shot.", 2927089],
94
- ["1-6: [Cinematic Shots] -- Reverse Follow", "A fireman is walking through fire captured with a reverse follow cinematic shot.", 9759630],
95
  ["1-7: [Cinematic Shots] -- Chest Transition", "An ancient Roman soldier walks through the crowd on the street captured with a chest transition cinematic shot.", 3982271],
96
  ["1-8: [Cinematic Shots] -- Mini Jib Reveal",
97
  "A British Redcoat soldier is walking through the mountains captured with a mini jib reveal cinematic shot.",
 
64
  "3-4: [Sports Concepts] -- Playing Golf",
65
  "3-5: [Sports Concepts] -- Skateboarding",
66
  ],
67
+ label="Select MotionDirector *",
68
  info="Which MotionDirector would you like to use!"
69
  )
70
 
71
+ text_pormpt = gr.Textbox(label="Text Prompt *", value='', placeholder="Input your text prompt here!")
72
  neg_text_pormpt = gr.Textbox(label="Negative Text Prompt", value='', placeholder="default: None")
73
 
74
  submit = gr.Button("Generate")
 
80
  [generated_video, generated_video_baseline]
81
  )
82
 
83
+ gr.Markdown("Note: * denotes required field. Tips: More detailed text prompt is helpful for generating better results.")
84
  # Examples
85
  gr.Markdown("## Examples")
86
  gr.Examples(
87
  fn=motiondirector,
88
  examples=[
89
  ["1-1: [Cinematic Shots] -- Zoom Out", "A spaceman standing on the moon captured with a zoom out.",
90
+ 5894219],
91
  ["1-2: [Cinematic Shots] -- Zoom In", "A polar bear standing at the top of a snowy mountain captured with a zoom in.", 7938587],
92
  ["1-3: [Cinematic Shots] -- Dolly Zoom (Hitchcockian Zoom) 1", "A panda standing in front of an ancient Chinese temple captured with a dolly zoom.", 8238823],
93
  ["1-4: [Cinematic Shots] -- Dolly Zoom (Hitchcockian Zoom) 2", "A lion sitting on top of a cliff captured with a dolly zoom.", 1675932],
94
  ["1-5: [Cinematic Shots] -- Follow", "A fireman is walking through fire captured with a follow cinematic shot.", 2927089],
95
+ ["1-6: [Cinematic Shots] -- Reverse Follow", "A fireman is walking through fire captured with a reverse follow cinematic shot.", 271723],
96
  ["1-7: [Cinematic Shots] -- Chest Transition", "An ancient Roman soldier walks through the crowd on the street captured with a chest transition cinematic shot.", 3982271],
97
  ["1-8: [Cinematic Shots] -- Mini Jib Reveal",
98
  "A British Redcoat soldier is walking through the mountains captured with a mini jib reveal cinematic shot.",
demo/motiondirector.py CHANGED
@@ -96,6 +96,7 @@ def prepare_input_latents(
96
  else:
97
  random_seed = random.randint(100, 10000000)
98
  torch.manual_seed(random_seed)
 
99
  if '1-' in model_select:
100
  noise_prior = 0.3
101
  elif '2-' in model_select:
@@ -122,7 +123,7 @@ def prepare_input_latents(
122
  latents = torch.randn(shape, dtype=torch.half)
123
  latents_base = latents
124
 
125
- return latents, latents_base
126
 
127
 
128
  class MotionDirector():
@@ -157,7 +158,7 @@ class MotionDirector():
157
  with torch.autocast(device, dtype=torch.half):
158
  # prepare input latents
159
  with torch.no_grad():
160
- init_latents,init_latents_base = prepare_input_latents(
161
  pipe=self.pipe,
162
  batch_size=1,
163
  num_frames=16,
 
96
  else:
97
  random_seed = random.randint(100, 10000000)
98
  torch.manual_seed(random_seed)
99
+ print(f"random_seed: {random_seed}")
100
  if '1-' in model_select:
101
  noise_prior = 0.3
102
  elif '2-' in model_select:
 
123
  latents = torch.randn(shape, dtype=torch.half)
124
  latents_base = latents
125
 
126
+ return latents, latents_base, random_seed
127
 
128
 
129
  class MotionDirector():
 
158
  with torch.autocast(device, dtype=torch.half):
159
  # prepare input latents
160
  with torch.no_grad():
161
+ init_latents, init_latents_base, random_seed = prepare_input_latents(
162
  pipe=self.pipe,
163
  batch_size=1,
164
  num_frames=16,