File size: 1,337 Bytes
f7e033c
16db363
f7e033c
a9ed66b
f7e033c
 
581caf0
c20ce40
2244a09
3490191
 
8c8ecdc
 
c20ce40
 
 
45182a1
9bc5e26
cc16ea6
45182a1
 
a9ed66b
d8793bf
3a7b25b
30f62bc
a9ed66b
3a7b25b
a9ed66b
 
cc16ea6
a9ed66b
 
 
 
3a7b25b
a9ed66b
48bbfd9
 
 
 
3a7b25b
48bbfd9
e7d31a7
a4ea02b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
#import torch
#from torch import autocast // only for GPU

from PIL import Image

import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

from diffusers import StableDiffusionPipeline
#from diffusers import StableDiffusionImg2ImgPipeline

print("hello sylvain")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_auth_token=YOUR_TOKEN)
pipe.to(device)

gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")

def infer(prompt): 
    
    #image = pipe(prompt, init_image=init_image)["sample"][0]
    images_list = pipe([prompt] * 4)
    images = []
    safe_image = Image.open(r"unsafe.png")
    for i, image in enumerate(images_list["images"]):
        if(images_list["nsfw_content_detected"][i]):
            images.append(safe_image)
        else:
            images.append(image)
    
    return images

print("Great sylvain ! Everything is working fine !")

title="Stable Diffusion CPU"
description="Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled.</b>" 

gr.Interface(fn=infer, inputs="text", outputs=gallery,title=title,description=description).queue(max_size=10).launch(enable_queue=True)