from diffusers import StableDiffusionInpaintPipeline
import gradio as gr
import numpy as np
import imageio
from PIL import Image
import torch
import modin.pandas as pd
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = StableDiffusionInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting", safety_checker=None)
pipe = pipe.to(device)
def resize(value,img):
img = Image.open(img)
img = img.resize((value,value))
return img
def predict(source_img, prompt, negative_prompt):
imageio.imwrite("data.png", source_img["image"])
imageio.imwrite("data_mask.png", source_img["mask"])
src = resize(768, "data.png")
src.save("src.png")
mask = resize(768, "data_mask.png")
mask.save("mask.png")
image = pipe(prompt=prompt, negative_prompt=negative_prompt, image=src, mask_image=mask, num_inference_steps=20).images[0]
return image
title="Stable Diffusion 2.0 Inpainting CPU"
description="Inpainting with Stable Diffusion 2.0
Warning: Slow process... ~10 min inference time.
Please use 512x512 or 768x768 square .png image as input to avoid memory error!!!"
gr.Interface(fn=predict, inputs=[gr.Image(source="upload", type="numpy", tool="sketch", elem_id="source_container"), gr.Textbox(label='What you want the AI to Generate, 77 Token limit'), gr.Textbox(label='What you Do Not want the AI to generate')], outputs='image', title=title, description=description, article = "Code Monkey: Manjushri").launch(max_threads=True, debug=True)