import gradio as gr import torch from diffusers import AutoPipelineForInpainting, UNet2DConditionModel import diffusers from PIL import Image import os from io import BytesIO import base64 import re SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret') # Regex pattern to match data URI scheme data_uri_pattern = re.compile(r'data:image/(png|jpeg|jpg|webp);base64,') def readb64(b64): # Remove any data URI scheme prefix with regex b64 = data_uri_pattern.sub("", b64) # Decode and open the image with PIL img = Image.open(BytesIO(base64.b64decode(b64))) return img # convert from PIL to base64 def writeb64(image): buffered = BytesIO() image.save(buffered, format="PNG") b64image = base64.b64encode(buffered.getvalue()) b64image_str = b64image.decode("utf-8") return b64image_str device = "cuda" if torch.cuda.is_available() else "cpu" pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to(device) def read_content(file_path: str) -> str: """read the content of target file """ with open(file_path, 'r', encoding='utf-8') as f: content = f.read() return content def predict(secret_token, input_image_b64, input_mask_b64, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"): if secret_token != SECRET_TOKEN: raise gr.Error( f'Invalid secret token. Please fork the original space if you want to use it for yourself.') if negative_prompt == "": negative_prompt = None scheduler_class_name = scheduler.split("-")[0] add_kwargs = {} if len(scheduler.split("-")) > 1: add_kwargs["use_karras"] = True if len(scheduler.split("-")) > 2: add_kwargs["algorithm_type"] = "sde-dpmsolver++" scheduler = getattr(diffusers, scheduler_class_name) pipe.scheduler = scheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler", **add_kwargs) init_image = readb64(input_image_b64).convert("RGB").resize((1024, 1024)) mask = readb64(input_mask_b64).convert("RGB").resize((1024, 1024)) output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength) return writeb64(output.images[0]) inpainter = gr.Blocks() with inpainter as demo: gr.HTML("""

This space is a REST API to programmatically inpaint an image.

Interested in using it? Please use the original space, thank you!

""") secret_token = gr.Textbox() input_image_b64 = gr.Textbox() input_mask_b64 = gr.Textbox() prompt = gr.Textbox() guidance_scale = gr.Number(value=7.5, minimum=1.0, maximum=20.0, step=0.1, label="guidance_scale") steps = gr.Number(value=20, minimum=10, maximum=30, step=1, label="steps") strength = gr.Number(value=0.99, minimum=0.01, maximum=1.0, step=0.01, label="strength") negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt", info="what you don't want to see in the image") schedulers = ["DEISMultistepScheduler", "HeunDiscreteScheduler", "EulerDiscreteScheduler", "DPMSolverMultistepScheduler", "DPMSolverMultistepScheduler-Karras", "DPMSolverMultistepScheduler-Karras-SDE"] scheduler = gr.Dropdown(label="Schedulers", choices=schedulers, value="EulerDiscreteScheduler") output_image_b64 = gr.Textbox() btn = gr.Button("Inpaint") btn.click(fn=predict, inputs=[secret_token, input_image_b64, input_mask_b64, prompt, negative_prompt, guidance_scale, steps, strength, scheduler], outputs=output_image_b64) inpainter.queue(max_size=25).launch()