karimbenharrak's picture
Update handler.py
9d02d34 verified
raw
history blame
2.11 kB
from typing import Dict, List, Any
import torch
from diffusers import StableDiffusionXLImg2ImgPipeline, DiffusionPipeline, AutoencoderKL
from PIL import Image
import base64
from io import BytesIO
from diffusers.image_processor import VaeImageProcessor
# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device.type != 'cuda':
raise ValueError("need to run on GPU")
class EndpointHandler():
def __init__(self, path=""):
self.smooth_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16
)
self.smooth_pipe.to("cuda")
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
"""
:param data: A dictionary contains `inputs` and optional `image` field.
:return: A dictionary with `image` field contains image in base64.
"""
encoded_image = data.pop("image", None)
prompt = data.pop("prompt", "")
if encoded_image is not None:
image = self.decode_base64_image(encoded_image)
image_processor = VaeImageProcessor();
latents = image_processor.preprocess(image)
latents = latents.to(device="cuda")
vae = AutoencoderKL.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
subfolder="vae", use_safetensors=True,
).to("cuda")
with torch.no_grad():
latents_dist = vae.encode(latents).latent_dist.sample() * vae.config.scaling_factor
self.smooth_pipe.enable_xformers_memory_efficient_attention()
out = self.smooth_pipe(prompt, image=latents_dist).images[0]
return out
# helper to decode input image
def decode_base64_image(self, image_string):
base64_image = base64.b64decode(image_string)
buffer = BytesIO(base64_image)
image = Image.open(buffer)
return image