AnthonyOlatunji commited on
Commit
59e171a
Β·
verified Β·
1 Parent(s): f4d805d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -23
app.py CHANGED
@@ -1,40 +1,48 @@
1
  # app.py
2
- import gradio as gr
3
- import subprocess
4
- import shlex
5
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def generate_video(prompt: str) -> str:
8
  """
9
- Replace the inside of this function with your real video-gen call.
10
- It must return a path to an .mp4 file.
11
  """
12
  out_path = "/tmp/out.mp4"
13
 
14
- # === placeholder: ffmpeg text-on-black demo ===
15
- # (so you can verify the UI/video pipeline is wired up)
16
- # Draw your prompt centered in white on black for 3 seconds.
17
- cmd = f"""
18
- ffmpeg -y \
19
- -f lavfi -i color=c=black:s=640x360:d=3 \
20
- -vf "drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:
21
- text='{prompt}':
22
- fontcolor=white:fontsize=48:
23
- x=(w-text_w)/2:y=(h-text_h)/2" \
24
- -codec:a copy {out_path}
25
- """
26
- # run it
27
- subprocess.run(shlex.split(cmd), check=True)
28
  return out_path
29
 
30
- # === Gradio UI ===
 
31
  with gr.Blocks() as demo:
32
- gr.Markdown("# 🎬 Video Generator")
33
- prompt = gr.Textbox(label="Enter your prompt here")
34
  video_out = gr.Video(label="Generated Video")
35
  btn = gr.Button("Generate Video")
36
 
37
  btn.click(fn=generate_video, inputs=prompt, outputs=video_out)
38
 
39
- # Launch with a public link on Spaces
40
  demo.launch(share=True)
 
1
  # app.py
 
 
 
2
  import os
3
+ import gradio as gr
4
+ from huggingface_hub import InferenceClient
5
+
6
+ # ── CONFIG ─────────────────────────────────────────────────────────────────────
7
+
8
+ # (1) Your HF token must be set in Secrets as HF_HUB_TOKEN
9
+ HF_TOKEN = os.getenv("HF_HUB_TOKEN", None)
10
+ if not HF_TOKEN:
11
+ raise RuntimeError("Please set a HF_HUB_TOKEN in your Space secrets!")
12
+
13
+ # (2) Model to call
14
+ MODEL_ID = "damo-vilab/text-to-video-ms-1.7b"
15
+
16
+ # Initialize the HF Inference client
17
+ client = InferenceClient(token=HF_TOKEN)
18
+
19
+ # ── GENERATION FUNCTION ────────────────────────────────────────────────────────
20
 
21
  def generate_video(prompt: str) -> str:
22
  """
23
+ Calls HF's text-to-video API, writes out an MP4, and returns its path.
 
24
  """
25
  out_path = "/tmp/out.mp4"
26
 
27
+ # 1) Fire off the inference request
28
+ # The `text_to_video` method returns raw bytes for the first generated video.
29
+ result = client.text_to_video(model=MODEL_ID, inputs=prompt)
30
+
31
+ # 2) Save to disk
32
+ video_bytes = result["generated_video"] # bytes
33
+ with open(out_path, "wb") as f:
34
+ f.write(video_bytes)
35
+
 
 
 
 
 
36
  return out_path
37
 
38
+ # ── GRADIO UI ─────────────────────────────────────────────────────────────────
39
+
40
  with gr.Blocks() as demo:
41
+ gr.Markdown("## 🎬 Text-to-Video Generator")
42
+ prompt = gr.Textbox(label="Enter your prompt here", placeholder="a chicken crosses the road")
43
  video_out = gr.Video(label="Generated Video")
44
  btn = gr.Button("Generate Video")
45
 
46
  btn.click(fn=generate_video, inputs=prompt, outputs=video_out)
47
 
 
48
  demo.launch(share=True)