from transformers import pipeline import cv2 import numpy as np import os # Initialize a text-to-image generation pipeline (for example purposes) generator = pipeline('text-to-image', model='CompVis/stable-diffusion-v1-4') # List of prompts for each frame prompts = [ "A spaceship in space, vibrant colors", "A spaceship flying past planets", "A spaceship approaching a distant galaxy", "A spaceship landing on an alien planet" ] # Folder to save frames os.makedirs('frames', exist_ok=True) # Generate images for each prompt for idx, prompt in enumerate(prompts): image = generator(prompt)[0]['image'] image.save(f'frames/frame_{idx:03d}.png') # Combine frames into a video frame_files = [f'frames/frame_{i:03d}.png' for i in range(len(prompts))] frame_width, frame_height = cv2.imread(frame_files[0]).shape[1], cv2.imread(frame_files[0]).shape[0] video_writer = cv2.VideoWriter('space_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 1, (frame_width, frame_height)) for file in frame_files: img = cv2.imread(file) video_writer.write(img) video_writer.release() print("Video created: space_video.mp4")