File size: 2,081 Bytes
15dbd5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
from typing import Dict, List, Any
import torch
from base64 import b64decode
from diffusers import AutoencoderKLHunyuanVideo
from diffusers.video_processor import VideoProcessor
from diffusers.utils import export_to_video
class EndpointHandler:
def __init__(self, path=""):
self.device = "cpu"
self.dtype = torch.float32
self.vae = (
AutoencoderKLHunyuanVideo.from_pretrained(
path, subfolder="vae", torch_dtype=self.dtype
)
.to(self.device, self.dtype)
.eval()
)
self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio
self.video_processor = VideoProcessor(
vae_scale_factor=self.vae_scale_factor_spatial
)
@torch.no_grad()
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
"""
Args:
data (:obj:):
includes the input data and the parameters for the inference.
"""
tensor = data["inputs"]
tensor = b64decode(tensor.encode("utf-8"))
parameters = data.get("parameters", {})
if "shape" not in parameters:
raise ValueError("Expected `shape` in parameters.")
if "dtype" not in parameters:
raise ValueError("Expected `dtype` in parameters.")
DTYPE_MAP = {
"float16": torch.float16,
"float32": torch.float32,
"bfloat16": torch.bfloat16,
}
shape = parameters.get("shape")
dtype = DTYPE_MAP.get(parameters.get("dtype"))
tensor = torch.frombuffer(bytearray(tensor), dtype=dtype).reshape(shape)
tensor = tensor.to(self.device, self.dtype)
tensor = tensor / self.vae.config.scaling_factor
with torch.no_grad():
frames = self.vae.decode(tensor, return_dict=False)[0]
frames = self.video_processor.postprocess_video(frames, output_type="pil")[0]
path = export_to_video(frames, fps=15)
with open(path, "rb") as f:
video = f.read()
return video
|