|
|
import gradio as gr |
|
|
from diffusers import StableDiffusionPipeline, DiffusionPipeline |
|
|
import torch |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
text2img = StableDiffusionPipeline.from_pretrained( |
|
|
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 |
|
|
).to("cuda") |
|
|
|
|
|
|
|
|
img2vid = DiffusionPipeline.from_pretrained( |
|
|
"damo-vilab/image-to-video", torch_dtype=torch.float16, variant="fp16" |
|
|
).to("cuda") |
|
|
|
|
|
def generate_image(prompt): |
|
|
image = text2img(prompt).images[0] |
|
|
return image |
|
|
|
|
|
def generate_video(image): |
|
|
video_frames = img2vid(image).frames |
|
|
return video_frames |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("## π¨ Text β€ Image β€ Video Generator") |
|
|
|
|
|
with gr.Row(): |
|
|
prompt = gr.Textbox(label="π Enter Prompt") |
|
|
img_output = gr.Image(label="πΌοΈ Generated Image") |
|
|
btn_img = gr.Button("Generate Image") |
|
|
|
|
|
with gr.Row(): |
|
|
img_input = gr.Image(label="π₯ Drop Image Here") |
|
|
vid_output = gr.Video(label="π¬ Generated Video") |
|
|
btn_vid = gr.Button("Generate Video") |
|
|
|
|
|
btn_img.click(fn=generate_image, inputs=prompt, outputs=img_output) |
|
|
btn_vid.click(fn=generate_video, inputs=img_input, outputs=vid_output) |
|
|
|
|
|
demo.launch() |