emeersman's picture
Conditionally add random generator to inference pipeline
0f9de12
raw
history blame contribute delete
No virus
2.46 kB
from typing import Dict, List, Any
import torch
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device.type != 'cuda':
raise ValueError("need to run on GPU")
model_id = "stabilityai/stable-diffusion-2-1-base"
class EndpointHandler():
def __init__(self, path=""):
# load the optimized model
self.pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
self.pipe = self.pipe.to(device)
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
"""
Args:
data (:obj:):
includes the input data and the parameters for the inference.
Return:
A :obj:`dict`:. base64 encoded image
"""
prompt = data.pop("inputs", data)
params = data.pop("parameters", data)
# hyperparamters
num_inference_steps = params.pop("num_inference_steps", 20)
guidance_scale = params.pop("guidance_scale", 7.5)
negative_prompt = params.pop("negative_prompt", None)
height = params.pop("height", None)
width = params.pop("width", None)
manual_seed = params.pop("manual_seed", -1)
out = None
if manual_seed != -1:
generator = torch.Generator(device='cuda')
generator.manual_seed(manual_seed)
# run inference pipeline
out = self.pipe(prompt,
generator=generator,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
num_images_per_prompt=1,
negative_prompt=negative_prompt,
height=height,
width=width
)
else:
# run inference pipeline
out = self.pipe(prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
num_images_per_prompt=1,
negative_prompt=negative_prompt,
height=height,
width=width
)
# return first generated PIL image
return out.images[0]