Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python | |
| # coding: utf-8 | |
| # # Stable Diffusion with 🤗 Diffusers | |
| #!pip install -Uq diffusers transformers fastcore | |
| # ## Using Stable Diffusion | |
| import logging | |
| from pathlib import Path | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| from PIL import Image | |
| import numpy as np | |
| logging.disable(logging.WARNING) | |
| from diffusers import StableDiffusionImg2ImgPipeline | |
| from fastai.vision.augment import CropPad | |
| import streamlit as st | |
| imageLocation = st.empty() | |
| with imageLocation.container(): | |
| st.header('Animate your dream') | |
| st.write(f'Tap > to reveal sidebar. Select a style or artist. Enter text prompts and select the number of frames.\ | |
| Include an optional negative prompt. Press the button to generate the animation. \ | |
| Running on {"GPU takes 3-5 minutes." if torch.cuda.is_available() else "CPU does not work. You are advised to upgrade to (a paid) GPU after duplicating the space"}') | |
| st.markdown('<a style="display:inline-block" href="https://huggingface.co/spaces/sci4/AnimateYourDream?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>',unsafe_allow_html=True) | |
| st.image('DaliDream.gif') | |
| with st.sidebar: | |
| fn = 'Animation' #st.text_input('Name of animation','Dali') | |
| style = st.text_input('Animation style (artist)','surreal Dali') | |
| zoom = st.checkbox('Zoom in animation',False) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| prompt1 = st.text_input('Prompt 1','a landscape') | |
| prompt2 = st.text_input('Prompt 2','weird animals') | |
| prompt3 = st.text_input('Prompt 3','a castle with weird animals') | |
| with col2: | |
| frames1 = st.number_input('Frames in 1', min_value=5, max_value=10, value=5, step=1) | |
| frames2 = st.number_input('Frames in 2', min_value=5, max_value=10, value=5, step=1) | |
| frames3 = st.number_input('Frames in 3', min_value=5, max_value=10, value=5, step=1) | |
| negative_prompt = st.text_input('Negative prompt','text') | |
| prompts = [[prompt1,frames1], | |
| [prompt2,frames2], | |
| [prompt3,frames3]] | |
| def zoomIn(im,scale = 0.97): | |
| size = im.size[0] | |
| return im.crop_pad(int(size*scale)).resize((size,size)) | |
| def fade(im0,im1,steps=20): | |
| """Fade from one image to another""" | |
| return [Image.fromarray(((1-i/steps)*np.array(im0)+i/steps*np.array(im1)).astype(np.uint8)) for i in range(steps)] | |
| def makeMovie(prompts, style='', negative_prompt='', scale = (512-4)/512,mix_factor=0.01,strength=0.5,guidance_scale=7,num_inference_steps=50): | |
| # Create an initial image then iterate | |
| with st.spinner('Be patient, it takes about a minute to generate the initial image on a GPU, but is likely to time out on CPU.'): | |
| pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", | |
| revision="fp16", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) | |
| if torch.cuda.is_available(): | |
| pipe = pipe.to("cuda") | |
| prompt1 = f'{prompts[0][0]} in the style of {style}' if style!='' else prompts[0][0] | |
| im1 = pipe(prompt1).images[0] | |
| with st.spinner('Preparing animation pipeline takes another minute on a GPU'): | |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( | |
| "CompVis/stable-diffusion-v1-4",revision="fp16",torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) | |
| if torch.cuda.is_available(): | |
| pipe = pipe.to("cuda") | |
| im = im1 | |
| movie = [im] | |
| for prompt,frames in prompts: | |
| prompt = f'{prompt} in the style of {style}' if style!='' else prompt | |
| for i in range(frames): | |
| im = zoomIn(im,scale) | |
| im2 = pipe(prompt, num_images_per_prompt=1, image=im, negative_prompt=negative_prompt, | |
| strength=strength, guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps).images[0] | |
| if max(max(im2.getextrema()))>0: | |
| #st.write(i,prompt) | |
| im = Image.fromarray(((1-mix_factor*i)*np.array(im2)+mix_factor*i*np.array(im1)).astype(np.uint8)) | |
| movie += [im] | |
| imageLocation.image(im,caption=f'{prompt} frame {i}') | |
| n = len(movie) | |
| extMovie = [] | |
| for i in range(n): | |
| extMovie += fade(movie[i],movie[(i+1)%n],steps=20) | |
| return extMovie | |
| def create(fn,prompts,style='',negative_prompt='',zoom=True): | |
| st.header('Generating initial image') | |
| scale = (512-16)/512 if zoom else 1 | |
| movie = makeMovie(prompts, style, negative_prompt,scale,mix_factor=0.01,strength=0.5,guidance_scale=7,num_inference_steps=50) | |
| with st.spinner('Final step: stitching frames together to make animation'): | |
| movie[0].save(f'{fn}.gif', format='GIF', append_images=movie[1:], save_all=True, duration=50, loop=0) | |
| imageLocation.image(open(f'{fn}.gif','rb').read(),caption=f'{fn} displays above, as soon as it has loaded') | |
| st.sidebar.button('Generate animation',on_click=create, args=(fn,prompts,style,negative_prompt,zoom), type='primary') | |