Spaces:
Runtime error
Runtime error
import os, torch | |
import gradio as gr | |
from diffusers import StableDiffusionPipeline | |
# check if running in google colab environment | |
def is_google_colab(): | |
try: | |
import google.colab | |
return True | |
except: | |
return False | |
is_colab = is_google_colab() | |
# get hf user access token as an environment variable | |
TOKEN_KEY = os.getenv('AUTH_TOKEN') | |
# choose GPU else fallback to CPU | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# setup pipeline | |
if device == "cuda": | |
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=TOKEN_KEY) | |
else: | |
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=TOKEN_KEY) | |
pipe = pipe.to(device) | |
# define gradio function | |
def generate(prompt:str, seed:int, guidance:float, steps:int): | |
generator = torch.Generator(device).manual_seed(seed) | |
image = pipe(prompt=prompt, generator=generator, guidance_scale=guidance, num_inference_steps=steps).images[0] | |
return image | |
if device == "cuda": | |
device_name = torch.cuda.get_device_name(0) | |
print(device_name + " available! Using GPU π₯") | |
else: | |
print("Using CPUπ.") | |
# create the gradio UI | |
# set precision to 0 to round value to nearest int | |
demo = gr.Interface( | |
fn=generate, | |
inputs=[gr.Textbox(placeholder="castle on a mountain"), gr.Number(value=123456, precision=0), gr.Slider(0,10), gr.Number(value=50, precision=0)], | |
outputs="image", | |
allow_flagging="never", | |
) | |
# allow queueing or incoming requests, max=3 | |
demo.queue(concurrency_count=3) | |
# launch demo | |
demo.launch(debug=is_colab, share=is_colab) | |