Spaces:
Runtime error
Runtime error
adapters added
Browse files
app.py
CHANGED
|
@@ -10,6 +10,12 @@ device = "cuda"
|
|
| 10 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
| 11 |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 12 |
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
scheduler = DDIMScheduler.from_pretrained(
|
| 14 |
model_id,
|
| 15 |
subfolder="scheduler",
|
|
@@ -20,8 +26,13 @@ scheduler = DDIMScheduler.from_pretrained(
|
|
| 20 |
)
|
| 21 |
pipe.scheduler = scheduler
|
| 22 |
@spaces.GPU
|
| 23 |
-
def generate_video(prompt, guidance_scale, num_inference_steps,num_frames):
|
| 24 |
pipe.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
output = pipe(
|
| 26 |
prompt=prompt,
|
| 27 |
negative_prompt="bad quality, worse quality",
|
|
@@ -29,19 +40,22 @@ def generate_video(prompt, guidance_scale, num_inference_steps,num_frames):
|
|
| 29 |
guidance_scale=guidance_scale,
|
| 30 |
num_inference_steps=num_inference_steps,
|
| 31 |
)
|
| 32 |
-
|
| 33 |
name = str(uuid.uuid4()).replace("-", "")
|
| 34 |
path = f"/tmp/{name}.mp4"
|
| 35 |
export_to_video(output.frames[0], path, fps=10)
|
| 36 |
return path
|
| 37 |
|
|
|
|
|
|
|
|
|
|
| 38 |
iface = gr.Interface(
|
| 39 |
fn=generate_video,
|
| 40 |
inputs=[
|
| 41 |
gr.Textbox(label="Enter your prompt"),
|
| 42 |
gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
|
| 43 |
gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
|
| 44 |
-
gr.Slider(minimum=16, maximum=64, step
|
|
|
|
| 45 |
],
|
| 46 |
outputs=gr.Video(label="Generated Video"),
|
| 47 |
)
|
|
|
|
| 10 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
|
| 11 |
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 12 |
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to(device)
|
| 13 |
+
pipe.load_lora_weights(
|
| 14 |
+
"diffusers/animatediff-motion-lora-zoom-out", adapter_name="zoom-out",
|
| 15 |
+
)
|
| 16 |
+
pipe.load_lora_weights(
|
| 17 |
+
"diffusers/animatediff-motion-lora-pan-left", adapter_name="pan-left",
|
| 18 |
+
)
|
| 19 |
scheduler = DDIMScheduler.from_pretrained(
|
| 20 |
model_id,
|
| 21 |
subfolder="scheduler",
|
|
|
|
| 26 |
)
|
| 27 |
pipe.scheduler = scheduler
|
| 28 |
@spaces.GPU
|
| 29 |
+
def generate_video(prompt, guidance_scale, num_inference_steps, num_frames, adapter_choice):
|
| 30 |
pipe.to(device)
|
| 31 |
+
|
| 32 |
+
# Set adapters based on user selection
|
| 33 |
+
if adapter_choice:
|
| 34 |
+
pipe.set_adapters([adapter_choice], adapter_weights=[1.0])
|
| 35 |
+
|
| 36 |
output = pipe(
|
| 37 |
prompt=prompt,
|
| 38 |
negative_prompt="bad quality, worse quality",
|
|
|
|
| 40 |
guidance_scale=guidance_scale,
|
| 41 |
num_inference_steps=num_inference_steps,
|
| 42 |
)
|
|
|
|
| 43 |
name = str(uuid.uuid4()).replace("-", "")
|
| 44 |
path = f"/tmp/{name}.mp4"
|
| 45 |
export_to_video(output.frames[0], path, fps=10)
|
| 46 |
return path
|
| 47 |
|
| 48 |
+
# Available adapters (replace with your actual adapter names)
|
| 49 |
+
adapter_options = ["zoom-out", "pan-left"]
|
| 50 |
+
|
| 51 |
iface = gr.Interface(
|
| 52 |
fn=generate_video,
|
| 53 |
inputs=[
|
| 54 |
gr.Textbox(label="Enter your prompt"),
|
| 55 |
gr.Slider(minimum=0.5, maximum=10, value=7.5, label="Guidance Scale"),
|
| 56 |
gr.Slider(minimum=4, maximum=24, step=4, value=4, label="Inference Steps"),
|
| 57 |
+
gr.Slider(minimum=16, maximum=64, step=1, value=16, label="Frames"),
|
| 58 |
+
gr.Dropdown(choices=adapter_options, label="Select Adapter") # Add dropdown for adapter selection
|
| 59 |
],
|
| 60 |
outputs=gr.Video(label="Generated Video"),
|
| 61 |
)
|