LTX-Video-0.9.1-HFIE / handler.py
jbilcke-hf's picture
jbilcke-hf HF staff
Update handler.py
5e8a3d7 verified
raw
history blame
17.3 kB
from dataclasses import dataclass
from pathlib import Path
import pathlib
from typing import Dict, Any, Optional, Tuple
import asyncio
import base64
import io
import pprint
import logging
import random
import traceback
import os
import numpy as np
import torch
from diffusers import LTXPipeline, LTXImageToVideoPipeline
from PIL import Image
from varnish import Varnish
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Constraints
MAX_LARGE_SIDE = 1280
MAX_SMALL_SIDE = 768 # should be 720 but it must be divisible by 32
MAX_FRAMES = (8 * 21) + 1 # visual glitches appear after about 169 frames, so we cap it
# this is only a temporary solution (famous last words)
def apply_dirty_hack_to_patch_file_extensions_and_bypass_filter(directory):
"""
Recursively rename all '.wut' files to '.pth' in the given directory
Args:
directory (str): Path to the directory to process
"""
# Convert the directory path to absolute path
directory = os.path.abspath(directory)
# Walk through directory and its subdirectories
for root, _, files in os.walk(directory):
for filename in files:
if filename.endswith('.wut'):
# Get full path of the file
old_path = os.path.join(root, filename)
# Create new filename by replacing the extension
new_filename = filename.replace('.wut', '.pth')
new_path = os.path.join(root, new_filename)
try:
os.rename(old_path, new_path)
print(f"Renamed: {old_path} -> {new_path}")
except OSError as e:
print(f"Error renaming {old_path}: {e}")
def print_directory_structure(startpath):
"""Print the directory structure starting from the given path."""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * level
logger.info(f"{indent}{os.path.basename(root)}/")
subindent = ' ' * 4 * (level + 1)
for f in files:
logger.info(f"{subindent}{f}")
logger.info("💡 Applying a dirty hack (patch ""/repository"" to fix file extensions):")
apply_dirty_hack_to_patch_file_extensions_and_bypass_filter("/repository")
#logger.info("💡 Printing directory structure of ""/repository"":")
#print_directory_structure("/repository")
@dataclass
class GenerationConfig:
"""Configuration for video generation"""
# general content settings
prompt: str = ""
negative_prompt: str = "saturated, highlight, overexposed, highlighted, overlit, shaking, too bright, worst quality, inconsistent motion, blurry, jittery, distorted, cropped, watermarked, watermark, logo, subtitle, subtitles, lowres"
# video model settings (will be used during generation of the initial raw video clip)
# we use small values to make things a bit faster
width: int = 768
height: int = 416
# users may tend to always set this to the max, to get as much useable content as possible (which is MAX_FRAMES ie. 257).
# The value must be a multiple of 8, plus 1 frame.
# visual glitches appear after about 169 frames, so we don't need more actually
num_frames: int = (8 * 14) + 1
guidance_scale: float = 5.0
num_inference_steps: int = 30
# reproducible generation settings
seed: int = -1 # -1 means random seed
# varnish settings (will be used for post-processing after the raw video clip has been generated
fps: int = 30 # FPS of the final video (only applied at the the very end, when converting to mp4)
double_num_frames: bool = False # if True, the number of frames will be multiplied by 2 using RIFE
super_resolution: bool = False # if True, the resolution will be multiplied by 2 using Real_ESRGAN
grain_amount: float = 0.0 # be careful, adding film grian can negatively impact video compression
# audio settings
enable_audio: bool = False # Whether to generate audio
audio_prompt: str = "" # Text prompt for audio generation
audio_negative_prompt: str = "voices, voice, talking, speaking, speech" # Negative prompt for audio generation
def validate_and_adjust(self) -> 'GenerationConfig':
"""Validate and adjust parameters to meet constraints"""
# First check if it's one of our explicitly allowed resolutions
if not ((self.width == MAX_LARGE_SIDE and self.height == MAX_SMALL_SIDE) or
(self.width == MAX_SMALL_SIDE and self.height == MAX_LARGE_SIDE)):
# For other resolutions, ensure total pixels don't exceed max
MAX_TOTAL_PIXELS = MAX_SMALL_SIDE * MAX_LARGE_SIDE # or 921600 = 1280 * 720
# If total pixels exceed maximum, scale down proportionally
total_pixels = self.width * self.height
if total_pixels > MAX_TOTAL_PIXELS:
scale = (MAX_TOTAL_PIXELS / total_pixels) ** 0.5
self.width = max(128, min(MAX_LARGE_SIDE, round(self.width * scale / 32) * 32))
self.height = max(128, min(MAX_LARGE_SIDE, round(self.height * scale / 32) * 32))
else:
# Round dimensions to nearest multiple of 32
self.width = max(128, min(MAX_LARGE_SIDE, round(self.width / 32) * 32))
self.height = max(128, min(MAX_LARGE_SIDE, round(self.height / 32) * 32))
# Adjust number of frames to be in format 8k + 1
k = (self.num_frames - 1) // 8
self.num_frames = min((k * 8) + 1, MAX_FRAMES)
# Set random seed if not specified
if self.seed == -1:
self.seed = random.randint(0, 2**32 - 1)
return self
class EndpointHandler:
"""Handles video generation requests using LTX models and Varnish post-processing"""
def __init__(self, model_path: str = ""):
"""Initialize the handler with LTX models and Varnish
Args:
model_path: Path to LTX model weights
"""
# Enable TF32 for potential speedup on Ampere GPUs
#torch.backends.cuda.matmul.allow_tf32 = True
# Initialize models with bfloat16 precision
self.text_to_video = LTXPipeline.from_pretrained(
model_path,
torch_dtype=torch.bfloat16
).to("cuda")
self.image_to_video = LTXImageToVideoPipeline.from_pretrained(
model_path,
torch_dtype=torch.bfloat16
).to("cuda")
# Enable CPU offload for memory efficiency
#self.text_to_video.enable_model_cpu_offload()
#self.image_to_video.enable_model_cpu_offload()
# Initialize Varnish for post-processing
self.varnish = Varnish(
device="cuda" if torch.cuda.is_available() else "cpu",
model_base_dir="/repository/varnish",
# there is currently a bug with MMAudio and/or torch and/or the weight format and/or version..
# not sure how to fix that.. :/
#
# it says:
# File "dist-packages/varnish.py", line 152, in __init__
# self._setup_mmaudio()
# File "dist-packages/varnish/varnish.py", line 165, in _setup_mmaudio
# net.load_weights(torch.load(model.model_path, map_location=self.device, weights_only=False))
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# File "dist-packages/torch/serialization.py", line 1384, in load
# return _legacy_load(
# ^^^^^^^^^^^^^
# File "dist-packages/torch/serialization.py", line 1628, in _legacy_load
# magic_number = pickle_module.load(f, **pickle_load_args)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# _pickle.UnpicklingError: invalid load key, '<'.
enable_mmaudio=False,
)
async def process_frames(
self,
frames: torch.Tensor,
config: GenerationConfig
) -> tuple[str, dict]:
"""Post-process generated frames using Varnish
Args:
frames: Generated video frames tensor
config: Generation configuration
Returns:
Tuple of (video data URI, metadata dictionary)
"""
try:
# Process video with Varnish
result = await self.varnish(
input_data=frames, # note: this might contain a certain number of frames eg. 97, which will get doubled if double_num_frames is True
fps=config.fps, # this is the FPS of the final output video. This number can be used by Varnish to calculate the duration of a clip ((using frames * factor) / fps etc)
double_num_frames=config.double_num_frames, # if True, the number of frames will be multiplied by 2 using RIFE
super_resolution=config.super_resolution, # if True, the resolution will be multiplied by 2 using Real_ESRGAN
grain_amount=config.grain_amount,
enable_audio=config.enable_audio,
audio_prompt=config.audio_prompt,
audio_negative_prompt=config.audio_negative_prompt,
)
# Convert to data URI
video_uri = await result.write(
type="data-uri",
quality=17
)
# Collect metadata
metadata = {
"width": result.metadata.width,
"height": result.metadata.height,
"num_frames": result.metadata.frame_count,
"fps": result.metadata.fps,
"duration": result.metadata.duration,
"seed": config.seed,
}
return video_uri, metadata
except Exception as e:
logger.error(f"Error in process_frames: {str(e)}")
raise RuntimeError(f"Failed to process frames: {str(e)}")
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Process incoming requests for video generation
Args:
data: Request data containing:
- inputs (dict): Dictionary containing input, which can be either "prompt" (text field) or "image" (input image)
- parameters (dict):
- prompt (required, string): list of concepts to keep in the video.
- negative_prompt (optional, string): list of concepts to ignore in the video.
- width (optional, int, default to 768): width, or horizontal size in pixels.
- height (optional, int, default to 512): height, or vertical size in pixels.
- num_frames (optional, int, default to 129): the numer of frames must be a multiple of 8, plus 1 frame.
- guidance_scale (optional, float, default to 7.5): Guidance scale
- num_inference_steps (optional, int, default to 50): number of inference steps
- seed (optional, int, default to -1): set a random number generator seed, -1 means random seed.
- fps (optional, int, default to 24): FPS of the final video
- double_num_frames (optional, bool): if enabled, the number of frames will be multiplied by 2 using RIFE
- super_resolution (optional, bool): if enabled, the resolution will be multiplied by 2 using Real_ESRGAN
- grain_amount (optional, float): amount of film grain to add to the output video
- enable_audio (optional, bool): automatically generate an audio track
- audio_prompt (optional, str): prompt to use for the audio generation (concepts to add)
- audio_negative_prompt (optional, str): nehative prompt to use for the audio generation (concepts to ignore)
Returns:
Dictionary containing:
- video: Base64 encoded MP4 data URI
- content-type: MIME type
- metadata: Generation metadata
"""
inputs = data.get("inputs", dict())
input_prompt = inputs.get("prompt", "")
input_image = inputs.get("image")
params = data.get("parameters", dict())
if not input_prompt:
raise ValueError("The prompt should not be empty")
logger.info(f"Prompt: {input_prompt}")
logger.info(f"Raw parameters:")
pprint.pprint(params)
# Create and validate configuration
config = GenerationConfig(
# general content settings
prompt=input_prompt,
negative_prompt=params.get("negative_prompt", GenerationConfig.negative_prompt),
# video model settings (will be used during generation of the initial raw video clip)
width=params.get("width", GenerationConfig.width),
height=params.get("height", GenerationConfig.height),
num_frames=params.get("num_frames", GenerationConfig.num_frames),
guidance_scale=params.get("guidance_scale", GenerationConfig.guidance_scale),
num_inference_steps=params.get("num_inference_steps", GenerationConfig.num_inference_steps),
# reproducible generation settings
seed=params.get("seed", GenerationConfig.seed),
# varnish settings (will be used for post-processing after the raw video clip has been generated)
fps=params.get("fps", GenerationConfig.fps), # FPS of the final video (only applied at the the very end, when converting to mp4)
double_num_frames=params.get("double_num_frames", GenerationConfig.double_num_frames), # if True, the number of frames will be multiplied by 2 using RIFE
super_resolution=params.get("super_resolution", GenerationConfig.super_resolution), # if True, the resolution will be multiplied by 2 using Real_ESRGAN
grain_amount=params.get("grain_amount", GenerationConfig.grain_amount),
enable_audio=params.get("enable_audio", GenerationConfig.enable_audio),
audio_prompt=params.get("audio_prompt", GenerationConfig.audio_prompt),
audio_negative_prompt=params.get("audio_negative_prompt", GenerationConfig.audio_negative_prompt),
).validate_and_adjust()
logger.info(f"Global request settings:")
pprint.pprint(config)
try:
with torch.no_grad():
# Set random seeds
random.seed(config.seed)
np.random.seed(config.seed)
generator = torch.manual_seed(config.seed)
# Prepare generation parameters for the video model (we omit params that are destined to Varnish, or things like the seed which is set externally)
generation_kwargs = {
# general content settings
"prompt": config.prompt,
"negative_prompt": config.negative_prompt,
# video model settings (will be used during generation of the initial raw video clip)
"width": config.width,
"height": config.height,
"num_frames": config.num_frames,
"guidance_scale": config.guidance_scale,
"num_inference_steps": config.num_inference_steps,
# constants
"output_type": "pt",
"generator": generator
}
#logger.info(f"Video model generation settings:")
#pprint.pprint(generation_kwargs)
# Check if image-to-video generation is requested
if input_image:
# Process base64 image
if input_image.startswith('data:'):
input_image = image_data.split(',', 1)[1]
image_bytes = base64.b64decode(input_image)
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
generation_kwargs["image"] = image
frames = self.image_to_video(**generation_kwargs).frames
else:
frames = self.text_to_video(**generation_kwargs).frames
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
video_uri, metadata = loop.run_until_complete(self.process_frames(frames, config))
return {
"video": video_uri,
"content-type": "video/mp4",
"metadata": metadata
}
except Exception as e:
message = f"Error generating video ({str(e)})\n{traceback.format_exc()}"
print(message)
raise RuntimeError(message)