Spaces:
Runtime error
Runtime error
File size: 5,019 Bytes
a3a6d83 efb9194 a3a6d83 d6e73ec a3a6d83 d6e73ec 3d7263a 77640c6 1d11373 d2013f8 d6e73ec a3a6d83 d6e73ec e5803df 6b9d118 a3a6d83 e5803df aaa46e0 a3a6d83 20df5d5 d6e73ec 77640c6 728ac5d d6e73ec 20df5d5 d6e73ec 77640c6 728ac5d a3a6d83 181d587 aaa46e0 a3a6d83 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
#!/usr/bin/env python
# coding: utf-8
# # Stable Diffusion with 🤗 Diffusers
#!pip install -Uq diffusers transformers fastcore
# ## Using Stable Diffusion
import logging
from pathlib import Path
import torch
from diffusers import StableDiffusionPipeline
from PIL import Image
import numpy as np
logging.disable(logging.WARNING)
from diffusers import StableDiffusionImg2ImgPipeline
from fastai.vision.augment import CropPad
import streamlit as st
imageLocation = st.empty()
with imageLocation.container():
st.header('Animate your dream')
st.write(f'Tap > to reveal sidebar. Select a style or artist. Enter text prompts and select the number of frames.\
Include an optional negative prompt. Press the button to generate the animation. \
Running on {"GPU takes 3-5 minutes." if torch.cuda.is_available() else "CPU does not work. You are advised to upgrade to (a paid) GPU after duplicating the space"}')
st.markdown('<a style="display:inline-block" href="https://huggingface.co/spaces/sci4/AnimateYourDream?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>',unsafe_allow_html=True)
st.image('DaliDream.gif')
with st.sidebar:
fn = 'Animation' #st.text_input('Name of animation','Dali')
style = st.text_input('Animation style (artist)','surreal Dali')
zoom = st.checkbox('Zoom in animation',False)
col1, col2 = st.columns(2)
with col1:
prompt1 = st.text_input('Prompt 1','a landscape')
prompt2 = st.text_input('Prompt 2','weird animals')
prompt3 = st.text_input('Prompt 3','a castle with weird animals')
with col2:
frames1 = st.number_input('Frames in 1', min_value=5, max_value=10, value=5, step=1)
frames2 = st.number_input('Frames in 2', min_value=5, max_value=10, value=5, step=1)
frames3 = st.number_input('Frames in 3', min_value=5, max_value=10, value=5, step=1)
negative_prompt = st.text_input('Negative prompt','text')
prompts = [[prompt1,frames1],
[prompt2,frames2],
[prompt3,frames3]]
def zoomIn(im,scale = 0.97):
size = im.size[0]
return im.crop_pad(int(size*scale)).resize((size,size))
def fade(im0,im1,steps=20):
"""Fade from one image to another"""
return [Image.fromarray(((1-i/steps)*np.array(im0)+i/steps*np.array(im1)).astype(np.uint8)) for i in range(steps)]
def makeMovie(prompts, style='', negative_prompt='', scale = (512-4)/512,mix_factor=0.01,strength=0.5,guidance_scale=7,num_inference_steps=50):
# Create an initial image then iterate
with st.spinner('Be patient, it takes about a minute to generate the initial image on a GPU, but is likely to time out on CPU.'):
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
revision="fp16", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
prompt1 = f'{prompts[0][0]} in the style of {style}' if style!='' else prompts[0][0]
im1 = pipe(prompt1).images[0]
with st.spinner('Preparing animation pipeline takes another minute on a GPU'):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="fp16",torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
im = im1
movie = [im]
for prompt,frames in prompts:
prompt = f'{prompt} in the style of {style}' if style!='' else prompt
for i in range(frames):
im = zoomIn(im,scale)
im2 = pipe(prompt, num_images_per_prompt=1, image=im, negative_prompt=negative_prompt,
strength=strength, guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps).images[0]
if max(max(im2.getextrema()))>0:
#st.write(i,prompt)
im = Image.fromarray(((1-mix_factor*i)*np.array(im2)+mix_factor*i*np.array(im1)).astype(np.uint8))
movie += [im]
imageLocation.image(im,caption=f'{prompt} frame {i}')
n = len(movie)
extMovie = []
for i in range(n):
extMovie += fade(movie[i],movie[(i+1)%n],steps=20)
return extMovie
def create(fn,prompts,style='',negative_prompt='',zoom=True):
st.header('Generating initial image')
scale = (512-16)/512 if zoom else 1
movie = makeMovie(prompts, style, negative_prompt,scale,mix_factor=0.01,strength=0.5,guidance_scale=7,num_inference_steps=50)
with st.spinner('Final step: stitching frames together to make animation'):
movie[0].save(f'{fn}.gif', format='GIF', append_images=movie[1:], save_all=True, duration=50, loop=0)
imageLocation.image(open(f'{fn}.gif','rb').read(),caption=f'{fn} displays above, as soon as it has loaded')
st.sidebar.button('Generate animation',on_click=create, args=(fn,prompts,style,negative_prompt,zoom), type='primary')
|