import imageio import imageio_ffmpeg import torch from diffusers import MochiPipeline from diffusers.utils import export_to_video # Load the pre-trained video generation model model = MochiPipeline.from_pretrained( "MISHANM/video_generation", # variant="bf16", torch_dtype=torch.bfloat16, device_map="balanced" ) # Enable memory savings by tiling the VAE model.enable_vae_tiling() # Define the prompt and number of frames prompt = "A cow drinking water on the surface of Mars." num_frames = 20 frames = model(prompt, num_frames=num_frames).frames[0] export_to_video(frames, "video.mp4", fps=30) print("Video generation complete. Saved as 'video.mp4'.")