Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -97,7 +97,47 @@ def generate_video(input_image, prompt, height, width,
|
|
| 97 |
guidance_scale = 1, steps = 4,
|
| 98 |
seed = 42, randomize_seed = False,
|
| 99 |
progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
| 100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
if input_image is None:
|
| 102 |
raise gr.Error("Please upload an input image.")
|
| 103 |
|
|
@@ -174,4 +214,4 @@ with gr.Blocks() as demo:
|
|
| 174 |
)
|
| 175 |
|
| 176 |
if __name__ == "__main__":
|
| 177 |
-
demo.queue().launch()
|
|
|
|
| 97 |
guidance_scale = 1, steps = 4,
|
| 98 |
seed = 42, randomize_seed = False,
|
| 99 |
progress=gr.Progress(track_tqdm=True)):
|
| 100 |
+
"""
|
| 101 |
+
Generate a video from an input image using the Wan 2.1 I2V model with CausVid LoRA.
|
| 102 |
|
| 103 |
+
This function takes an input image and generates a video animation based on the provided
|
| 104 |
+
prompt and parameters. It uses the Wan 2.1 14B Image-to-Video model with CausVid LoRA
|
| 105 |
+
for fast generation in 4-8 steps.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
input_image (PIL.Image): The input image to animate. Will be resized to target dimensions.
|
| 109 |
+
prompt (str): Text prompt describing the desired animation or motion.
|
| 110 |
+
height (int): Target height for the output video. Will be adjusted to multiple of MOD_VALUE (32).
|
| 111 |
+
width (int): Target width for the output video. Will be adjusted to multiple of MOD_VALUE (32).
|
| 112 |
+
negative_prompt (str, optional): Negative prompt to avoid unwanted elements.
|
| 113 |
+
Defaults to default_negative_prompt (contains unwanted visual artifacts).
|
| 114 |
+
duration_seconds (float, optional): Duration of the generated video in seconds.
|
| 115 |
+
Defaults to 2. Clamped between MIN_FRAMES_MODEL/FIXED_FPS and MAX_FRAMES_MODEL/FIXED_FPS.
|
| 116 |
+
guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
|
| 117 |
+
Defaults to 1.0. Range: 0.0-20.0.
|
| 118 |
+
steps (int, optional): Number of inference steps. More steps = higher quality but slower.
|
| 119 |
+
Defaults to 4. Range: 1-30.
|
| 120 |
+
seed (int, optional): Random seed for reproducible results. Defaults to 42.
|
| 121 |
+
Range: 0 to MAX_SEED (2147483647).
|
| 122 |
+
randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
|
| 123 |
+
Defaults to False.
|
| 124 |
+
progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
tuple: A tuple containing:
|
| 128 |
+
- video_path (str): Path to the generated video file (.mp4)
|
| 129 |
+
- current_seed (int): The seed used for generation (useful when randomize_seed=True)
|
| 130 |
+
|
| 131 |
+
Raises:
|
| 132 |
+
gr.Error: If input_image is None (no image uploaded).
|
| 133 |
+
|
| 134 |
+
Note:
|
| 135 |
+
- The function automatically resizes the input image to the target dimensions
|
| 136 |
+
- Frame count is calculated as duration_seconds * FIXED_FPS (24)
|
| 137 |
+
- Output dimensions are adjusted to be multiples of MOD_VALUE (32)
|
| 138 |
+
- The function uses GPU acceleration via the @spaces.GPU decorator
|
| 139 |
+
- Generation time varies based on steps and duration (see get_duration function)
|
| 140 |
+
"""
|
| 141 |
if input_image is None:
|
| 142 |
raise gr.Error("Please upload an input image.")
|
| 143 |
|
|
|
|
| 214 |
)
|
| 215 |
|
| 216 |
if __name__ == "__main__":
|
| 217 |
+
demo.queue().launch(mcp_server=True)
|