from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from utils import write_video, dummy from PIL import Image import numpy as np import os os.environ["CUDA_VISIBLE_DEVICES"]="0" import torch import gradio as gr orig_prompt = "Create a relaxing atmosphere with the use of plants and other natural elements. Such as a hanging terrarium or a wall-mounted planter. Include plenty of storage options to keep the space organized and clutter-free. Consider adding a vanity with double sinks and plenty of drawers and cabinets. As well as a wall mounted medicine and towel storage." orig_negative_prompt = "lurry, bad art, blurred, text, watermark" def stable_diffusion_zoom_out( repo_id, original_prompt, negative_prompt, steps, num_frames, fps ): pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16") pipe.set_use_memory_efficient_attention_xformers(True) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") pipe.safety_checker = dummy current_image = Image.new(mode="RGBA", size=(512,512)) mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA") mask_image = Image.fromarray(255-mask_image).convert("RGB") current_image = current_image.convert("RGB") num_images = 1 prompt = [original_prompt] * num_images negative_prompt = [negative_prompt] * num_images images = pipe(prompt=prompt, negative_prompt=negative_prompt, image=current_image, mask_image=mask_image, num_inference_steps=25)[0] current_image = images[0] all_frames = [] all_frames.append(current_image) for i in range(num_frames): next_image = np.array(current_image.convert("RGBA"))*0 prev_image = current_image.resize((512-2*steps,512-2*steps)) prev_image = prev_image.convert("RGBA") prev_image = np.array(prev_image) next_image[:, :, 3] = 1 next_image[steps:512-steps,steps:512-steps,:] = prev_image prev_image = Image.fromarray(next_image) current_image = prev_image mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA") mask_image = Image.fromarray(255-mask_image).convert("RGB") current_image = current_image.convert("RGB") images = pipe(prompt=prompt, negative_prompt=negative_prompt, image=current_image, mask_image=mask_image, num_inference_steps=25)[0] current_image = images[0] current_image.paste(prev_image, mask=prev_image) all_frames.append(current_image) save_path = "infinite_zoom_out.mp4" write_video(save_path, all_frames, fps=fps) return save_path inputs = [ gr.inputs.Textbox(lines=1, default="stabilityai/stable-diffusion-2-inpainting", label="Model ID"), gr.inputs.Textbox(lines=5, default=orig_prompt, label="Prompt"), gr.inputs.Textbox(lines=1, default=orig_negative_prompt, label="Negative Prompt"), gr.inputs.Slider(minimum=1, maximum=64, default=32, label="Steps"), gr.inputs.Slider(minimum=1, maximum=500, default=10, step=10, label="Frames"), gr.inputs.Slider(minimum=1, maximum=100, default=16, step=1, label="FPS") ] output = gr.outputs.Video() examples = [ ["stabilityai/stable-diffusion-2-inpainting", orig_prompt, orig_negative_prompt, 32, 50, 16] ] title = "Stable Diffusion Infinite Zoom Out" description = """

For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
Duplicate Space

""" demo_app = gr.Interface( fn=stable_diffusion_zoom_out, inputs=inputs, outputs=output, title=title, description=description, theme='huggingface', examples=examples, cache_examples=True ) demo_app.launch(debug=True, enable_queue=True)