Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,682 Bytes
b7f7bb6 0c48f4b b7f7bb6 572c4e6 4dc94b9 9821031 572c4e6 30a8deb b7f7bb6 572c4e6 b7f7bb6 572c4e6 b7f7bb6 572c4e6 1f6711e 572c4e6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import gradio as gr
import torch
from diffusers import I2VGenXLPipeline
from diffusers.utils import export_to_gif, load_image
import spaces
# Initialize the pipeline
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
pipeline.enable_model_cpu_offload()
@spaces.GPU(duration=240)
def generate_gif(image, prompt, negative_prompt, num_inference_steps, guidance_scale, seed):
# Load the image
image = load_image(image).convert("RGB")
# Set the generator seed
generator = torch.manual_seed(seed)
# Generate the frames
frames = pipeline(
prompt=prompt,
image=image,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
generator=generator
).frames[0]
# Export to GIF
gif_path = "i2v.gif"
export_to_gif(frames, gif_path)
return gif_path
# Create the Gradio interface
iface = gr.Interface(
fn=generate_gif,
inputs=[
gr.Image(type="filepath", label="Input Image"),
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt"),
gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps"),
gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale"),
gr.Number(label="Seed", value=8888)
],
outputs=gr.File(label="Generated GIF"),
title="I2VGen-XL GIF Generator",
description="Generate a GIF from an image and a prompt using the I2VGen-XL model."
)
# Launch the interface
iface.launch() |