from typing import Dict, List, Any import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline from PIL import Image import base64 from io import BytesIO # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load StableDiffusionInpaintPipeline pipeline self.pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="fp16", torch_dtype=torch.float16, ) # use DPMSolverMultistepScheduler self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) # move to device self.pipe = self.pipe.to(device) def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ :param data: A dictionary contains `inputs` and optional `image` field. :return: A dictionary with `image` field contains image in base64. """ encoded_image = data.pop("image", None) encoded_mask_image = data.pop("mask_image", None) prompt = data.pop("prompt", "") # process image if encoded_image is not None and encoded_mask_image is not None: image = self.decode_base64_image(encoded_image) mask_image = self.decode_base64_image(encoded_mask_image) else: image = None mask_image = None # run inference pipeline out = self.pipe(prompt=prompt, image=image, mask_image=mask_image) # return first generate PIL image return out.images[0] # helper to decode input image def decode_base64_image(self, image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) image = Image.open(buffer) return image