import gradio as gr import os import moviepy.editor as mp import torch from diffusers import StableDiffusionPipeline # Load the Stable Diffusion model pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe.to("cpu") # Change to "cuda" if you have a GPU def generate_video(prompt, duration, framerate): temp_dir = "/tmp/sd_frames" if not os.path.exists(temp_dir): os.makedirs(temp_dir, exist_ok=True) # Generate frames using the prompt for i in range(1, int(duration * framerate) + 1): frame = pipe(prompt).images[0] frame.save(f"{temp_dir}/frame_{i:04d}.png") # Generate video from frames video = mp.ImageSequenceClip(temp_dir, fps=framerate) output_path = "/tmp/sd_video.mp4" video.write_videofile(output_path, codec="libx264") return output_path iface = gr.Interface( fn=generate_video, inputs=[ gr.Textbox(label="Prompt"), gr.Slider(label="Duration (seconds)", minimum=1, maximum=30, step=1, default=5), gr.Slider(label="Framerate (fps)", minimum=1, maximum=60, step=1, default=30) ], outputs=gr.Video(label="Generated Video"), title="AI Dreams & Visions Video Generator", description="Generate a video based on a prompt. Enter the prompt, set the duration and framerate, and click 'Generate Video'.", theme="dark", css="footer {visibility: hidden}" ) if __name__ == "__main__": iface.launch(share=True)