File size: 1,987 Bytes
e9165df
8549305
 
 
70cff9c
 
05c8948
8549305
 
 
70cff9c
05c8948
9f24a1c
8549305
 
 
1730273
e9165df
8549305
 
 
 
 
 
 
 
e9165df
70cff9c
8549305
70cff9c
8549305
 
 
70cff9c
 
e9165df
8549305
 
 
70cff9c
 
8549305
e9165df
70cff9c
8549305
 
70cff9c
 
 
8549305
 
9f24a1c
8549305
9f24a1c
8549305
 
9f24a1c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import gradio as gr
#import torch
#from torch import autocast // only for GPU

from PIL import Image
import numpy as np
from io import BytesIO
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

from diffusers import StableDiffusionImg2ImgPipeline

print("hello sylvain")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

#prompt_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
#prompt_pipe.to(device)

img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
img_pipe.to(device)

source_img = gr.Image(source="upload", type="filepath", label="init_img | 512*512 px")
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")

def resize(value,img):
  #baseheight = value
  img = Image.open(img)
  #hpercent = (baseheight/float(img.size[1]))
  #wsize = int((float(img.size[0])*float(hpercent)))
  #img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
  img = img.resize((value,value), Image.Resampling.LANCZOS)
  return img


def infer(prompt, source_img): 
         
    source_image = resize(512, source_img)
    source_image.save('source.png')
    images_list = img_pipe([prompt] * 2, init_image=source_image, strength=0.75)
    images = []
    safe_image = Image.open(r"unsafe.png")
    for i, image in enumerate(images_list["sample"]):
        if(images_list["nsfw_content_detected"][i]):
            images.append(safe_image)
        else:
            images.append(image)    
    return images

print("Great sylvain ! Everything is working fine !")

title="Img2Img Stable Diffusion CPU"
description="Img2Img Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled.</b>" 

gr.Interface(fn=infer, inputs=["text", source_img], outputs=gallery,title=title,description=description).queue(max_size=100).launch(enable_queue=True)