Commit
•
e3dcab5
1
Parent(s):
a54c820
Upload demo.py
Browse files
demo.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import InferenceClient
|
2 |
+
import base64
|
3 |
+
import os
|
4 |
+
from pathlib import Path
|
5 |
+
import time
|
6 |
+
|
7 |
+
def save_video(base64_video: str, output_path: str):
|
8 |
+
"""Save base64 encoded video to a file"""
|
9 |
+
video_bytes = base64.b64decode(base64_video)
|
10 |
+
with open(output_path, "wb") as f:
|
11 |
+
f.write(video_bytes)
|
12 |
+
|
13 |
+
def generate_video(
|
14 |
+
prompt: str,
|
15 |
+
endpoint_url: str,
|
16 |
+
token: str = None,
|
17 |
+
resolution: str = "1280x720",
|
18 |
+
video_length: int = 129,
|
19 |
+
num_inference_steps: int = 50,
|
20 |
+
seed: int = -1,
|
21 |
+
guidance_scale: float = 1.0,
|
22 |
+
flow_shift: float = 7.0,
|
23 |
+
embedded_guidance_scale: float = 6.0
|
24 |
+
) -> str:
|
25 |
+
"""Generate a video using the custom inference endpoint.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
prompt: Text prompt describing the video
|
29 |
+
endpoint_url: Full URL to the inference endpoint
|
30 |
+
token: HuggingFace API token for authentication
|
31 |
+
resolution: Video resolution (default: "1280x720")
|
32 |
+
video_length: Number of frames (default: 129 for 5s)
|
33 |
+
num_inference_steps: Number of inference steps (default: 50)
|
34 |
+
seed: Random seed, -1 for random (default: -1)
|
35 |
+
guidance_scale: Guidance scale value (default: 1.0)
|
36 |
+
flow_shift: Flow shift value (default: 7.0)
|
37 |
+
embedded_guidance_scale: Embedded guidance scale (default: 6.0)
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
Path to the saved video file
|
41 |
+
"""
|
42 |
+
# Initialize client
|
43 |
+
client = InferenceClient(model=endpoint_url, token=token)
|
44 |
+
|
45 |
+
# Prepare payload
|
46 |
+
payload = {
|
47 |
+
"inputs": prompt,
|
48 |
+
"resolution": resolution,
|
49 |
+
"video_length": video_length,
|
50 |
+
"num_inference_steps": num_inference_steps,
|
51 |
+
"seed": seed,
|
52 |
+
"guidance_scale": guidance_scale,
|
53 |
+
"flow_shift": flow_shift,
|
54 |
+
"embedded_guidance_scale": embedded_guidance_scale
|
55 |
+
}
|
56 |
+
|
57 |
+
# Make request
|
58 |
+
response = client.post(json=payload)
|
59 |
+
result = response.json()
|
60 |
+
|
61 |
+
# Save video
|
62 |
+
timestamp = int(time.time())
|
63 |
+
output_path = f"generated_video_{timestamp}.mp4"
|
64 |
+
save_video(result["video_base64"], output_path)
|
65 |
+
|
66 |
+
print(f"Video generated with seed {result['seed']}")
|
67 |
+
return output_path
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
|
71 |
+
hf_api_token = os.environ.get('HF_API_TOKEN', '')
|
72 |
+
endpoint_url = os.environ.get('ENDPOINT_URL', '')
|
73 |
+
|
74 |
+
video_path = generate_video(
|
75 |
+
endpoint_url=endpoint_url,
|
76 |
+
token=hf_api_token,
|
77 |
+
|
78 |
+
prompt="A cat walks on the grass, realistic style.",
|
79 |
+
|
80 |
+
# min resolution is 64x64, max is 4096x4096 (increment steps are by 16px)
|
81 |
+
# however the model is designed for 1280x720
|
82 |
+
resolution="1280x720",
|
83 |
+
|
84 |
+
# numbers of frames plus one (max 1024?)
|
85 |
+
# increments by 4 frames
|
86 |
+
video_length=49, # 129,
|
87 |
+
|
88 |
+
# number of denoising/sampling steps (default: 30)
|
89 |
+
num_inference_steps: int = 15, # 50,
|
90 |
+
|
91 |
+
seed: int = -1, # -1 to keep it random
|
92 |
+
|
93 |
+
# not sure why we have two guidance scales
|
94 |
+
guidance_scale = 1.0, # 3
|
95 |
+
|
96 |
+
# strength of prompt guidance (default: 6.0)
|
97 |
+
embedded_guidance_scale: float = 6.0
|
98 |
+
|
99 |
+
|
100 |
+
# video length (larger values result in shorter videos, default: 9.0, max: 30)
|
101 |
+
flow_shift: float = 9.0,
|
102 |
+
|
103 |
+
)
|
104 |
+
print(f"Video saved to: {video_path}")
|