Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from diffusers import I2VGenXLPipeline | |
from diffusers.utils import export_to_gif, load_image | |
import spaces | |
# Initialize the pipeline | |
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16") | |
pipeline.enable_model_cpu_offload() | |
def generate_gif(image, prompt, negative_prompt, num_inference_steps, guidance_scale, seed): | |
# Load the image | |
image = load_image(image).convert("RGB") | |
# Set the generator seed | |
generator = torch.manual_seed(seed) | |
# Generate the frames | |
frames = pipeline( | |
prompt=prompt, | |
image=image, | |
num_inference_steps=num_inference_steps, | |
negative_prompt=negative_prompt, | |
guidance_scale=guidance_scale, | |
generator=generator | |
).frames[0] | |
# Export to GIF | |
gif_path = "i2v.gif" | |
export_to_gif(frames, gif_path) | |
return gif_path | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=generate_gif, | |
inputs=[ | |
gr.Image(type="filepath", label="Input Image"), | |
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"), | |
gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt"), | |
gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps"), | |
gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale"), | |
gr.Number(label="Seed", value=8888) | |
], | |
outputs=gr.File(label="Generated GIF"), | |
title="I2VGen-XL GIF Generator", | |
description="Generate a GIF from an image and a prompt using the I2VGen-XL model." | |
) | |
# Launch the interface | |
iface.launch() |