import gradio as gr # import gradio.helpers import torch import os from glob import glob from pathlib import Path from typing import Optional import base64 from io import BytesIO import tempfile import numpy as np import cv2 import subprocess from DeepCache import DeepCacheSDHelper from PIL import Image from diffusers.utils import load_image, export_to_video from pipeline import StableVideoDiffusionPipeline import random from safetensors import safe_open from lcm_scheduler import AnimateLCMSVDStochasticIterativeScheduler SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret') # is that 8 or 25? hardcoded_fps = 25 hardcoded_duration_sec = 3 def get_safetensors_files(): models_dir = "./safetensors" safetensors_files = [ f for f in os.listdir(models_dir) if f.endswith(".safetensors") ] return safetensors_files def model_select(selected_file): print("load model weights", selected_file) pipe.unet.cpu() file_path = os.path.join("./safetensors", selected_file) state_dict = {} with safe_open(file_path, framework="pt", device="cpu") as f: for key in f.keys(): state_dict[key] = f.get_tensor(key) missing, unexpected = pipe.unet.load_state_dict(state_dict, strict=True) pipe.unet.cuda() del state_dict return def decode_data_uri_to_image(data_uri): # parse the data uri header, encoded = data_uri.split(",", 1) data = base64.b64decode(encoded) img = Image.open(BytesIO(data)) return img # ----------------------------- FRAME INTERPOLATION --------------------------------- # we cannot afford to use AI-based algorithms such as FILM or ST-MFNet, # those are way too slow for AiTube which needs things to be as fast as possible # ----------------------------------------------------------------------------------- def interpolate_video_frames( input_file_path, output_file_path, output_fps=hardcoded_fps, desired_duration=hardcoded_duration_sec, original_duration=hardcoded_duration_sec, output_width=None, output_height=None, use_cuda=False, # this requires FFmpeg to have been compiled with CUDA support (to try - I'm not sure the Hugging Face image has that by default) verbose=False): scale_factor = desired_duration / original_duration filters = [] # Scaling if dimensions are provided # note: upscaling produces disastrous results, # it will double the compute time # I think that's either because we are not hardware-accelerated, # or because of the interpolation done after it, which thus become more computationally intensive if output_width and output_height: filters.append(f'scale={output_width}:{output_height}') # note: from all fact, it looks like using a small macroblock is important for us, # since the video resolution is very small (usually 512x288px) interpolation_filter = f'minterpolate=mi_mode=mci:mc_mode=obmc:me=hexbs:vsbmc=1:mb_size=4:fps={output_fps}:scd=none,setpts={scale_factor}*PTS' #- `mi_mode=mci`: Specifies motion compensated interpolation. #- `mc_mode=obmc`: Overlapped block motion compensation is used. #- `me=hexbs`: Hexagon-based search (motion estimation method). #- `vsbmc=1`: Variable-size block motion compensation is enabled. #- `mb_size=4`: Sets the macroblock size. #- `fps={output_fps}`: Defines the output frame rate. #- `scd=none`: Disables scene change detection entirely. #- `setpts={scale_factor}*PTS`: Adjusts for the stretching of the video duration. # Frame interpolation setup filters.append(interpolation_filter) # Combine all filters into a single filter complex filter_complex = ','.join(filters) cmd = [ 'ffmpeg', '-i', input_file_path, ] # not supported by the current image, we will have to build it if use_cuda: cmd.extend(['-hwaccel', 'cuda', '-hwaccel_output_format', 'cuda']) cmd.extend([ '-filter:v', filter_complex, '-r', str(output_fps), output_file_path ]) # Adjust the log level based on the verbosity input if not verbose: cmd.insert(1, '-loglevel') cmd.insert(2, 'error') # Logging for debugging if verbose if verbose: print("output_fps:", output_fps) print("desired_duration:", desired_duration) print("original_duration:", original_duration) print("cmd:", cmd) try: subprocess.run(cmd, check=True) return output_file_path except subprocess.CalledProcessError as e: print("Failed to interpolate video. Error:", e) return input_file_path # In case of error, return original path # ----------------------------------- VIDEO ENCODING --------------------------------- # The Diffusers utils hardcode MP4V as a codec which is not supported by all browsers. # This is a critical issue for AiTube so we are forced to implement our own routine. # ------------------------------------------------------------------------------------ def export_to_video_file(video_frames, output_video_path=None, fps=hardcoded_fps): if output_video_path is None: output_video_path = tempfile.NamedTemporaryFile(suffix=".webm").name if isinstance(video_frames[0], np.ndarray): video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] elif isinstance(video_frames[0], Image.Image): video_frames = [np.array(frame) for frame in video_frames] # Use VP9 codec - don't freak out: yes, this will throw an exception, but this still works # https://stackoverflow.com/a/61116338 # I suspect there is a bug somewhere and the actual hex code should be different fourcc = cv2.VideoWriter_fourcc(*'VP90') h, w, c = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (w, h), True) for frame in video_frames: # Ensure the video frame is in the correct color format img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) video_writer.write(img) video_writer.release() return output_video_path noise_scheduler = AnimateLCMSVDStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=700.0, sigma_data=1.0, s_noise=1.0, rho=7, clip_denoised=False, ) pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", scheduler=noise_scheduler, torch_dtype=torch.float16, variant="fp16", ) pipe.to("cuda") pipe.enable_model_cpu_offload() # for smaller cost model_select("AnimateLCM-SVD-xt-1.1.safetensors") # pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) # for faster inference helper = DeepCacheSDHelper(pipe=pipe) helper.set_params( # cache_interval means the frequency of feature caching, specified as the number of steps between each cache operation. # with AnimateDiff this seems to have large effects, so we cannot use large values, # even with cache_interval=3 I notice a big degradation in quality cache_interval=2, # cache_branch_id identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes. # Note Julian: I should create my own benchmarks for this cache_branch_id=0, # Opting for a lower cache_branch_id or a larger cache_interval can lead to faster inference speed at the expense of reduced image quality #(ablation experiments of these two hyperparameters can be found in the paper). ) helper.enable() max_64_bit_int = 2**63 - 1 def sample( secret_token: str, input_image_base64: str, seed: Optional[int] = 42, randomize_seed: bool = False, motion_bucket_id: int = 80, fps_id: int = 8, max_guidance_scale: float = 1.2, min_guidance_scale: float = 1, width: int = 768, height: int = 384, num_inference_steps: int = 4, decoding_t: int = 4, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary. output_folder: str = "outputs_gradio", ): if secret_token != SECRET_TOKEN: raise gr.Error( f'Invalid secret token. Please fork the original space if you want to use it for yourself.') image = decode_data_uri_to_image(input_image_base64) print(f"seed={seed}\nrandomize_seed={randomize_seed}\nmotion_bucket_id={motion_bucket_id}\nfps_id={fps_id}\nmax_guidance_scale={max_guidance_scale}\nmin_guidance_scale={min_guidance_scale}\nwidth={width}\nheight={height}\nnum_inference_steps={num_inference_steps}\ndecoding_t={decoding_t}") if image.mode == "RGBA": image = image.convert("RGB") if randomize_seed: seed = random.randint(0, max_64_bit_int) generator = torch.manual_seed(seed) os.makedirs(output_folder, exist_ok=True) base_count = len(glob(os.path.join(output_folder, "*.mp4"))) video_path = os.path.join(output_folder, f"{base_count:06d}.mp4") with torch.autocast("cuda"): frames = pipe( image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, height=height, width=width, num_inference_steps=num_inference_steps, min_guidance_scale=min_guidance_scale, max_guidance_scale=max_guidance_scale, ).frames[0] export_to_video(frames, video_path, fps=fps_id) torch.manual_seed(seed) # Read the content of the video file and encode it to base64 with open(video_path, "rb") as video_file: video_base64 = base64.b64encode(video_file.read()).decode('utf-8') # Prepend the appropriate data URI header with MIME type return 'data:video/mp4;base64,' + video_base64 with gr.Blocks() as demo: gr.HTML("""
This space is a headless component of the cloud rendering engine used by AiTube.
It is not available for public use, but you can use the original space.