File size: 1,314 Bytes
dc0618b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from typing import Dict, List, Any
from diffusers import AutoPipelineForInpainting
from PIL import Image
from io import BytesIO
import base64
import torch
class EndpointHandler():
def __init__(self, path=""):
self.pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16")
def __call__(self, data: Dict[str, Any]):
"""
data args:
image: b64 string
mask: b64 string
prompt string
returns:
image
"""
inputs = data.pop("inputs", data)
# decode base64 image to PIL
image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
mask = Image.open(BytesIO(base64.b64decode(inputs['mask'])))
prompt = inputs['prompt']
# fix the seed
generator = torch.Generator(device="cuda").manual_seed(0)
image = pipe(
prompt=prompt,
image=image,
mask_image=mask,
guidance_scale=8.0,
num_inference_steps=20, # steps between 15 and 30 work well for us (from model card)
strength=0.99, # make sure to use `strength` below 1.0
generator=generator,
).images[0]
return image |