Fabrice-TIERCELIN commited on
Commit
5d1f182
·
verified ·
1 Parent(s): 314d36b

fn=generate_video,

Browse files
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -45,8 +45,31 @@ def initialize_model(model_path):
45
  print('Model initialized: ' + model_path)
46
  return hunyuan_video_sampler
47
 
48
- @spaces.GPU(duration=120)
49
  def generate_video(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  model,
51
  prompt,
52
  resolution,
@@ -57,6 +80,7 @@ def generate_video(
57
  flow_shift,
58
  embedded_guidance_scale
59
  ):
 
60
  if torch.cuda.device_count() == 0:
61
  gr.Warning('Set this space to GPU config to make it work.')
62
  return None
@@ -65,6 +89,7 @@ def generate_video(
65
  width, height = resolution.split("x")
66
  width, height = int(width), int(height)
67
  negative_prompt = "" # not applicable in the inference
 
68
 
69
  outputs = model.predict(
70
  prompt=prompt,
@@ -81,6 +106,7 @@ def generate_video(
81
  embedded_guidance_scale=embedded_guidance_scale
82
  )
83
 
 
84
  samples = outputs['samples']
85
  sample = samples[0].unsqueeze(0)
86
 
@@ -92,6 +118,7 @@ def generate_video(
92
  save_videos_grid(sample, video_path, fps=24)
93
  logger.info(f'Sample saved to: {video_path}')
94
 
 
95
  return video_path
96
 
97
  def create_demo(model_path):
@@ -158,7 +185,7 @@ If you can't use _Hunyuan Video_, you can use _[CogVideoX](https://huggingface.c
158
  """)
159
 
160
  generate_btn.click(
161
- fn=lambda *inputs: generate_video(model, *inputs),
162
  inputs=[
163
  prompt,
164
  resolution,
 
45
  print('Model initialized: ' + model_path)
46
  return hunyuan_video_sampler
47
 
 
48
  def generate_video(
49
+ prompt,
50
+ resolution,
51
+ video_length,
52
+ seed,
53
+ num_inference_steps,
54
+ guidance_scale,
55
+ flow_shift,
56
+ embedded_guidance_scale
57
+ ):
58
+ print('generate_video (prompt: ' + prompt + ')')
59
+ return generate_video_gpu(
60
+ model,
61
+ prompt,
62
+ resolution,
63
+ video_length,
64
+ seed,
65
+ num_inference_steps,
66
+ guidance_scale,
67
+ flow_shift,
68
+ embedded_guidance_scale
69
+ )
70
+
71
+ @spaces.GPU(duration=120)
72
+ def generate_video_gpu(
73
  model,
74
  prompt,
75
  resolution,
 
80
  flow_shift,
81
  embedded_guidance_scale
82
  ):
83
+ print('generate_video_gpu (prompt: ' + prompt + ')')
84
  if torch.cuda.device_count() == 0:
85
  gr.Warning('Set this space to GPU config to make it work.')
86
  return None
 
89
  width, height = resolution.split("x")
90
  width, height = int(width), int(height)
91
  negative_prompt = "" # not applicable in the inference
92
+ print('Predicting video...')
93
 
94
  outputs = model.predict(
95
  prompt=prompt,
 
106
  embedded_guidance_scale=embedded_guidance_scale
107
  )
108
 
109
+ print('Video predicted')
110
  samples = outputs['samples']
111
  sample = samples[0].unsqueeze(0)
112
 
 
118
  save_videos_grid(sample, video_path, fps=24)
119
  logger.info(f'Sample saved to: {video_path}')
120
 
121
+ print('Return the video')
122
  return video_path
123
 
124
  def create_demo(model_path):
 
185
  """)
186
 
187
  generate_btn.click(
188
+ fn=generate_video,
189
  inputs=[
190
  prompt,
191
  resolution,