File size: 1,520 Bytes
d0fb5a5
994f306
 
 
 
 
 
c8b9d47
 
 
994f306
5a33599
 
 
 
c8b9d47
994f306
 
5a33599
9273f9c
 
994f306
 
c8b9d47
d0fb5a5
c8b9d47
d0fb5a5
 
 
 
9273f9c
d0fb5a5
 
9273f9c
d0fb5a5
 
c8b9d47
994f306
d0fb5a5
 
994f306
d0fb5a5
6397763
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os, torch
import gradio as gr
from diffusers import StableDiffusionPipeline

# get hf user access token as an environment variable
TOKEN_KEY = os.getenv('AUTH_TOKEN')

# choose GPU else fallback to CPU
device = "cuda" if torch.cuda.is_available() else "cpu"

# setup pipeline
if device == "cuda":
    pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=TOKEN_KEY)
else:
    pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=TOKEN_KEY)
pipe = pipe.to(device)

# define gradio function
def generate(prompt:str, seed:int, guidance:float, steps:int):
    generator = torch.Generator(device).manual_seed(seed)
    image = pipe(prompt=prompt, generator=generator, guidance_scale=guidance, num_inference_steps=steps).images[0]
    return image

if device == "cuda":
    device_name = torch.cuda.get_device_name(0)
    print(device_name + " available.")
else:
    print("Using CPU.")

# create the gradio UI
# set precision to 0 to round value to nearest int
demo = gr.Interface(
    fn=generate,
    inputs=[gr.Textbox(placeholder="castle on a mountain"), gr.Number(value=123456, precision=0), gr.Slider(0,10), gr.Number(value=50, precision=0)],
    outputs="image",
    allow_flagging="never",
    )

# allow queueing or incoming requests, max=3 
demo.queue(concurrency_count=3)

# launch demo
# demo.launch(share=True) # uncomment if running outside of huggingface spaces
demo.launch()