File size: 1,683 Bytes
e6785f8
b7f4263
e6785f8
 
 
 
 
 
d4acfaa
e6785f8
 
 
 
60383f5
e6785f8
60383f5
 
e6785f8
 
60383f5
e6785f8
d4acfaa
60383f5
e6785f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfa7dd3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
import torch
import streamlit as st
from PIL import Image
import numpy as np
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline

device="cpu"

pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=st.secrets['USER_TOKEN'])
pipe.to(device)

def resize(w_val,l_val,img):
  img = Image.open(img)
  img = img.resize((w_val,l_val), Image.Resampling.LANCZOS)
  #img = img.resize((value,value), Image.Resampling.LANCZOS)
  return img


def infer(source_img, prompt, guide, steps, seed, Strength): 
    generator = torch.Generator('cpu').manual_seed(seed)     
    source_image = resize(768, 512, source_img)
    source_image.save('source.png')
    image_list = pipe([prompt], init_image=source_image, strength=Strength, guidance_scale=guide, num_inference_steps=steps)
    images = []
    safe_image = Image.open(r"unsafe.png")
    for i, image in enumerate(image_list["sample"]):
        if(image_list["nsfw_content_detected"][i]):
            images.append(safe_image)
        else:
            images.append(image)    
    return image

gr.Interface(fn=infer, inputs=[gr.Image(source="upload", type="filepath", label="Raw Image"), gr.Textbox(label = 'Prompt Input Text'),
    gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
    gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations'),
    gr.Slider(
        label = "Seed",
        minimum = 0,
        maximum = 2147483647,
        step = 1,
        randomize = True), gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .5)
    ], outputs='image').queue(max_size=10).launch(enable_queue=True)