Fabrice-TIERCELIN's picture
Use an Image-to-Image model designed for inpainting
810187a verified
raw
history blame
1.54 kB
from diffusers import StableDiffusionXLInpaintPipeline
import gradio as gr
import numpy as np
import imageio
from PIL import Image
import torch
import modin.pandas as pd
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", safety_checker=None)
pipe = pipe.to(device)
def resize(value,img):
img = Image.open(img)
img = img.resize((value,value))
return img
def predict(source_img, prompt, negative_prompt):
imageio.imwrite("data.png", source_img["image"])
imageio.imwrite("data_mask.png", source_img["mask"])
src = resize(768, "data.png")
src.save("src.png")
mask = resize(768, "data_mask.png")
mask.save("mask.png")
image = pipe(prompt=prompt, negative_prompt=negative_prompt, image=src, mask_image=mask, num_inference_steps=20).images[0]
return image
title="SDXL 1.0 Inpainting CPU"
description="Inpainting with SDXL 1.0 <br />Warning: Slow process... ~10 min inference time.<br> <b>Please use square .png image as input, 512x512, 768x768, or 1024x1024</b>"
gr.Interface(fn=predict, inputs=[gr.Image(source="upload", type="numpy", tool="sketch", elem_id="source_container"), gr.Textbox(label='What you want the AI to Generate, 77 Token limit'), gr.Textbox(label='What you Do Not want the AI to generate')], outputs='image', title=title, description=description, article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(max_threads=True, debug=True)