import base64 from io import BytesIO from typing import Dict, Any import torch from PIL import Image from io import BytesIO from diffusers import StableDiffusionImg2ImgPipeline import requests # helper decoder def decode_base64_image(image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) return Image.open(buffer) class EndpointHandler: def __init__(self, path=""): self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(path, torch_dtype=torch.float16, revision="fp16") self.pipe = self.pipe.to("cuda") def __call__(self, data: Any) -> Dict[str, str]: """ Return predict value. :param data: A dictionary contains `inputs` and optional `image` field. :return: A dictionary with `image` field contains image in base64. """ prompts = data.pop("inputs", None) url = data.pop("image", None) seed = data.pop("seed", 0) width = data.pop("width", 0) height = data.pop("height", 0) response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") #init_image = decode_base64_image(encoded_image) init_image.thumbnail((width, height)) generator = torch.Generator(device="cuda").manual_seed(seed) images = self.pipe(prompts, image=init_image,generator = generator, **data).images img_strs = [] for image in images: buffered = BytesIO() image.save(buffered, format="png") img_str = base64.b64encode(buffered.getvalue()) img_strs.append(img_str) if len(img_strs) > 1 : return {"images": [img_str.decode() for img_str in img_strs] } else: return {"image": img_strs[0].decode() }