import os, json, re, sys, subprocess, gc, tqdm import math import time import numpy import random import spaces import threading import gradio as gr from PIL import Image, ImageOps from moviepy import VideoFileClip from datetime import datetime, timedelta from huggingface_hub import hf_hub_download, snapshot_download, login HF_TOKEN=os.environ.get('HF_TOKEN') login(token=HF_TOKEN) import insightface from insightface.app import FaceAnalysis from facexlib.parsing import init_parsing_model from facexlib.utils.face_restoration_helper import FaceRestoreHelper import torch from diffusers import CogVideoXDPMScheduler from diffusers.utils import load_image from diffusers.image_processor import VaeImageProcessor from diffusers.training_utils import free_memory from util.utils import * from util.rife_model import load_rife_model, rife_inference_with_latents from models.utils import process_face_embeddings from models.transformer_consisid import ConsisIDTransformer3DModel from models.pipeline_consisid import ConsisIDPipeline from models.eva_clip import create_model_and_transforms from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from models.eva_clip.utils_qformer import resize_numpy_image_long os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" device = "cuda" if torch.cuda.is_available() else "cpu" hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran") snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife") snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview") model_path = "BestWishYsh/ConsisID-preview" lora_path = None lora_rank = 128 dtype = torch.bfloat16 if os.path.exists(os.path.join(model_path, "transformer_ema")): subfolder = "transformer_ema" else: subfolder = "transformer" transformer = ConsisIDTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder) scheduler = CogVideoXDPMScheduler.from_pretrained(model_path, subfolder="scheduler") try: is_kps = transformer.config.is_kps except: is_kps = False # 1. load face helper models face_helper = FaceRestoreHelper( upscale_factor=1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', device=device, model_rootpath=os.path.join(model_path, "face_encoder") ) face_helper.face_parse = None face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device, model_rootpath=os.path.join(model_path, "face_encoder")) face_helper.face_det.eval() face_helper.face_parse.eval() model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', os.path.join(model_path, "face_encoder", "EVA02_CLIP_L_336_psz14_s6B.pt"), force_custom_clip=True) face_clip_model = model.visual face_clip_model.eval() eva_transform_mean = getattr(face_clip_model, 'image_mean', OPENAI_DATASET_MEAN) eva_transform_std = getattr(face_clip_model, 'image_std', OPENAI_DATASET_STD) if not isinstance(eva_transform_mean, (list, tuple)): eva_transform_mean = (eva_transform_mean,) * 3 if not isinstance(eva_transform_std, (list, tuple)): eva_transform_std = (eva_transform_std,) * 3 eva_transform_mean = eva_transform_mean eva_transform_std = eva_transform_std face_main_model = FaceAnalysis(name='antelopev2', root=os.path.join(model_path, "face_encoder"), providers=['CUDAExecutionProvider']) handler_ante = insightface.model_zoo.get_model(f'{model_path}/face_encoder/models/antelopev2/glintr100.onnx', providers=['CUDAExecutionProvider']) face_main_model.prepare(ctx_id=0, det_size=(640, 640)) handler_ante.prepare(ctx_id=0) face_clip_model.to(device, dtype=dtype) face_helper.face_det.to(device) face_helper.face_parse.to(device) transformer.to(device, dtype=dtype) ##free_memory() pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype) # If you're using with lora, add this code if lora_path: pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name="test_1") pipe.fuse_lora(lora_scale=1 / lora_rank) scheduler_args = {} if "variance_type" in pipe.scheduler.config: variance_type = pipe.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args) pipe.to(device) os.makedirs("./output", exist_ok=True) os.makedirs("./gradio_tmp", exist_ok=True) upscale_model = load_sd_upscale("model_real_esran/RealESRGAN_x4.pth", device) frame_interpolation_model = load_rife_model("model_rife") def convert_to_gif(video_path): clip = VideoFileClip(video_path) gif_path = video_path.replace(".mp4", ".gif") clip.write_gif(gif_path, fps=8) return gif_path def delete_old_files(): while True: now = datetime.now() cutoff = now - timedelta(minutes=10) directories = ["./output", "./gradio_tmp"] for directory in directories: for filename in os.listdir(directory): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path)) if file_mtime < cutoff: os.remove(file_path) time.sleep(600) @spaces.GPU(duration=70) def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)): seed = seed_value if seed == -1: seed = random.randint(0, 2**8 - 1) id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB")) id_image = resize_numpy_image_long(id_image, 1024) id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante, eva_transform_mean, eva_transform_std, face_main_model, device, dtype, id_image, original_id_image=id_image, is_align_face=True, cal_uncond=False) if is_kps: kps_cond = face_kps else: kps_cond = None tensor = align_crop_face_image.cpu().detach() tensor = tensor.squeeze() tensor = tensor.permute(1, 2, 0) tensor = tensor.numpy() * 255 tensor = tensor.astype(np.uint8) image = ImageOps.exif_transpose(Image.fromarray(tensor)) prompt = prompt.strip('"') generator = torch.Generator(device).manual_seed(seed) if seed else None video_pt = pipe( prompt=prompt, image=image, num_videos_per_prompt=1, num_inference_steps=4, num_frames=49, use_dynamic_cfg=False, guidance_scale=7.0, generator=generator, id_vit_hidden=id_vit_hidden, id_cond=id_cond, kps_cond=kps_cond, output_type="pt", ).frames latents = video_pt ##free_memory() if scale_status: latents = upscale_batch_and_concatenate(upscale_model, latents, device) if rife_status: latents = rife_inference_with_latents(frame_interpolation_model, latents) batch_size = latents.shape[0] batch_video_frames = [] for batch_idx in range(batch_size): pt_image = latents[batch_idx] pt_image = torch.stack([pt_image[i] for i in range(pt_image.shape[0])]) image_np = VaeImageProcessor.pt_to_numpy(pt_image) image_pil = VaeImageProcessor.numpy_to_pil(image_np) batch_video_frames.append(image_pil) video_path = save_video(batch_video_frames[0], fps=math.ceil((len(batch_video_frames[0]) - 1) / 6)) video_update = gr.update(visible=True, value=video_path) gif_path = convert_to_gif(video_path) gif_update = gr.update(visible=True, value=gif_path) seed_update = gr.update(visible=True, value=seed) return video_path, video_update, gif_update, seed_update ##return video_pt, seed ##threading.Thread(target=delete_old_files, daemon=True).start() examples_images = [ ["asserts/example_images/1.png", "A woman adorned with a delicate flower crown, is standing amidst a field of gently swaying wildflowers. Her eyes sparkle with a serene gaze, and a faint smile graces her lips, suggesting a moment of peaceful contentment. The shot is framed from the waist up, highlighting the gentle breeze lightly tousling her hair. The background reveals an expansive meadow under a bright blue sky, capturing the tranquility of a sunny afternoon."], ["asserts/example_images/2.png", "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."], ["asserts/example_images/3.png", "The video depicts a man sitting at an office desk, engaged in his work. He is dressed in a formal suit and appears to be focused on his computer screen. The office environment is well-organized, with shelves filled with binders and other office supplies neatly arranged. The man is holding a red cup, possibly containing a beverage, which he drinks from before setting it down on the desk. He then proceeds to type on the keyboard, indicating that he is working on something on his computer. The overall atmosphere of the video suggests a professional setting where the man is diligently working on his tasks."] ] with gr.Blocks() as demo: gr.Markdown("""
ConsisID SpaceπŸ€—
πŸ€— Model Hub | πŸ“š Dataset | 🌐 Github | πŸ“ Page | πŸ“œ arxiv
If the Space is too busy, duplicate it to use privately
⚠️ This demo is for academic research and experiential use only.
""") with gr.Row(): with gr.Column(): with gr.Accordion("IPT2V: Face Input", open=True): image_input = gr.Image(label="Input Image (should contain clear face)") prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5) with gr.Accordion("Examples", open=False): examples_component_images = gr.Examples( examples_images, inputs=[image_input, prompt], cache_examples=False, ) with gr.Group(): with gr.Column(): with gr.Row(): seed_param = gr.Number( label="Inference Seed (Enter a positive number, -1 for random)", value=42 ) with gr.Row(): enable_scale = gr.Checkbox(label="Super-Resolution (720 Γ— 480 -> 2880 Γ— 1920)", value=False) enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps)", value=False) gr.Markdown( "✨In this demo, we use [RIFE](https://github.com/hzwer/ECCV2022-RIFE) for frame interpolation and [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) for upscaling(Super-Resolution)." ) generate_button = gr.Button("🎬 Generate Video") with gr.Column(): video_output = gr.Video(label="ConsisID Generate Video", width=720, height=480) with gr.Row(): download_video_button = gr.File(label="πŸ“₯ Download Video", visible=False) download_gif_button = gr.File(label="πŸ“₯ Download GIF", visible=False) seed_text = gr.Number(label="Seed Used for Video Generation", visible=False) gr.Markdown("""
πŸŽ₯ Video Gallery

The video features a woman in exquisite hybrid armor adorned with iridescent gemstones, standing amidst gently falling cherry blossoms. Her piercing yet serene gaze hints at quiet determination, as a breeze catches a loose strand of her hair. She stands in a tranquil courtyard framed by moss-covered stone walls and wooden arches, with blossoms casting soft shadows on the ground. The petals swirl around her, adding a dreamlike quality, while the blurred backdrop emphasizes her poised figure. The scene conveys elegance, strength, and tranquil readiness, capturing a moment of peace before an upcoming challenge.

The video features a baby wearing a bright superhero cape, standing confidently with arms raised in a powerful pose. The baby has a determined look on their face, with eyes wide and lips pursed in concentration, as if ready to take on a challenge. The setting appears playful, with colorful toys scattered around and a soft rug underfoot, while sunlight streams through a nearby window, highlighting the fluttering cape and adding to the impression of heroism. The overall atmosphere is lighthearted and fun, with the baby's expressions capturing a mix of innocence and an adorable attempt at bravery, as if truly ready to save the day.

The video features a man standing next to an airplane, engaged in a conversation on his cell phone. he is wearing sunglasses and a black top, and he appears to be talking seriously. The airplane has a green stripe running along its side, and there is a large engine visible behind his. The man seems to be standing near the entrance of the airplane, possibly preparing to board or just having disembarked. The setting suggests that he might be at an airport or a private airfield. The overall atmosphere of the video is professional and focused, with the man's attire and the presence of the airplane indicating a business or travel context.

The video features a woman with blonde hair standing on a beach near the water's edge. She is wearing a black swimsuit and appears to be enjoying her time by the sea. The sky above is clear with some clouds, and the ocean waves gently lap against the shore. The woman seems to be holding something white in her hand, possibly a piece of driftwood or a small object found on the beach. The overall atmosphere of the video is serene and relaxing, capturing the beauty of nature and the simple pleasure of being by the ocean.

The video features a man sitting in a red armchair, enjoying a cup of coffee or tea. he is dressed in a light-colored outfit and has long dark-haired hair. The setting appears to be indoors, with large windows providing a view of a misty or foggy coastal landscape outside. The room has a modern design with geometric structures visible in the background. There is a small round table next to the armchair, also holding a cup. The overall atmosphere suggests a calm and serene moment, possibly during a cold or rainy day by the sea.

The video shows a young boy sitting at a table, eating a piece of food. He appears to be enjoying his meal, as he takes a bite and chews it. The boy is wearing a blue shirt and has short hair. The background is dark, with some light coming from the left side of the frame. There is a straw visible on the right side of the frame, suggesting that there may be a drink next to the boy's plate. The overall atmosphere of the video seems casual and relaxed, with the focus on the boy's enjoyment of his food.

The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel.

The video features a young man standing outdoors in a snowy park. he is wearing a colorful winter jacket with a floral pattern and a white knit hat. The background shows a snowy landscape with trees, benches, and a metal fence. The ground is covered in snow, and there is a light snowfall in the air. The man appears to be enjoying the winter weather, as he smiles and gives a thumbs-up gesture towards the camera. The overall atmosphere of the video is cheerful and festive, capturing the beauty of a snowy day in a park.

""") generate_button.click( fn=plex, inputs=[prompt, image_input, seed_param, enable_scale, enable_rife], outputs=[video_output, download_video_button, download_gif_button, seed_text], ) demo.queue(max_size=15) demo.launch(debug=True)