from typing import Dict, List, Any import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image, StableDiffusionXLImg2ImgPipeline from PIL import Image import base64 from io import BytesIO # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): #self.fast_pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda") #self.generator = torch.Generator(device="cuda").manual_seed(0) # self.smooth_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( # "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True # ) # self.smooth_pipe.to("cuda") # load StableDiffusionInpaintPipeline pipeline self.pipe = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16, ) # use DPMSolverMultistepScheduler # self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) self.pipe.enable_model_cpu_offload() self.pipe.enable_xformers_memory_efficient_attention() # move to device self.pipe = self.pipe.to(device) # self.pipe2 = AutoPipelineForInpainting.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) # self.pipe2.to("cuda") # self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2) def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ :param data: A dictionary contains `inputs` and optional `image` field. :return: A dictionary with `image` field contains image in base64. """ encoded_image = data.pop("image", None) encoded_mask_image = data.pop("mask_image", None) prompt = data.pop("prompt", "") negative_prompt = data.pop("negative_prompt", "") method = data.pop("method", "slow") strength = data.pop("strength", 0.2) guidance_scale = data.pop("guidance_scale", 8.0) num_inference_steps = data.pop("num_inference_steps", 20) """ if(method == "smooth"): if encoded_image is not None: image = self.decode_base64_image(encoded_image) out = self.smooth_pipe(prompt, image=image).images[0] return out """ # process image if encoded_image is not None and encoded_mask_image is not None: image = self.decode_base64_image(encoded_image).convert("RGB") mask_image = self.decode_base64_image(encoded_mask_image).convert("RGB") else: image = None mask_image = None """ if(method == "fast"): image = self.fast_pipe( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, # steps between 15 and 30 work well for us strength=strength, # make sure to use `strength` below 1.0 generator=self.generator, ).images[0] return image """ #pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda") # run inference pipeline out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image) print("1st pipeline part successful!") image = out.images[0].resize((1024, 1024)) print("image resizing successful!") """ self.pipe2.enable_xformers_memory_efficient_attention() image = self.pipe2( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, #8.0 num_inference_steps=num_inference_steps, #100 strength=strength, #0.2 output_type="latent", # let's keep in latent to save some VRAM ).images[0] print("2nd pipeline part successful!") self.pipe3.enable_xformers_memory_efficient_attention() image2 = self.pipe3( prompt=prompt, image=image, guidance_scale=guidance_scale, #8.0 num_inference_steps=num_inference_steps, #100 strength=strength, #0.2 ).images[0] print("3rd pipeline part successful!") """ # return first generate PIL image return image # helper to decode input image def decode_base64_image(self, image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) image = Image.open(buffer) return image