Moyao001's picture
Add files using upload-large-folder tool
a385e25 verified
import argparse
import json
import os
import random
import sys
sys.path.insert(0, '../src')
import torch
from einops import rearrange, repeat
from pytorch_lightning import seed_everything
from safetensors import safe_open
from torch import autocast
from scripts.sampling.util import (
chunk,
convert_load_lora,
create_model,
init_sampling,
load_video_keyframes,
model_load_ckpt,
perform_save_locally_video,
)
from sgm.util import append_dims
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=42)
parser.add_argument(
"--config_path",
type=str,
default="configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml",
)
parser.add_argument(
"--ckpt_path",
type=str,
default="models/tv2v-no2ndca-depthmidas.ckpt",
)
parser.add_argument(
"--use_default", action="store_true", help="use default ckpt at first"
)
parser.add_argument(
"--basemodel_path",
type=str,
default="",
help="load a new base model instead of original sd-1.5",
)
parser.add_argument("--basemodel_listpath", type=str, default="")
parser.add_argument("--lora_path", type=str, default="")
parser.add_argument("--vae_path", type=str, default="")
parser.add_argument(
"--jsonl_path",
type=str,
required=True,
help="path to jsonl file containing video paths, prompts, and edit prompts"
)
parser.add_argument("--save_root", type=str, default="outputs")
parser.add_argument("--H", type=int, default=512)
parser.add_argument("--W", type=int, default=768)
parser.add_argument("--original_fps", type=int, default=18)
parser.add_argument("--target_fps", type=int, default=6)
parser.add_argument("--num_keyframes", type=int, default=17)
parser.add_argument("--negative_prompt", type=str, default="ugly, low quality")
parser.add_argument("--sample_steps", type=int, default=30)
parser.add_argument("--sampler_name", type=str, default="DPMPP2SAncestralSampler")
parser.add_argument(
"--discretization_name", type=str, default="LegacyDDPMDiscretization"
)
parser.add_argument("--cfg_scale", type=float, default=7.5)
parser.add_argument("--prior_coefficient_x", type=float, default=0.0)
parser.add_argument("--prior_coefficient_noise", type=float, default=1.0)
parser.add_argument("--sdedit_denoise_strength", type=float, default=0.0)
parser.add_argument("--num_samples", type=int, default=2)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument('--disable_check_repeat', action='store_true', help='disable check repeat')
parser.add_argument('--lora_strength', type=float, default=0.8)
parser.add_argument('--save_type', type=str, default='mp4', choices=['gif', 'mp4'])
parser.add_argument('--inpainting_mode', action='store_true', help='inpainting mode')
args = parser.parse_args()
seed = args.seed
if seed == -1:
seed = random.randint(0, 1000000)
seed_everything(seed)
model = create_model(config_path=args.config_path).to("cuda")
ckpt_path = args.ckpt_path
print("--> load ckpt from: ", ckpt_path)
model = model_load_ckpt(model, path=ckpt_path)
model.eval()
with open(args.jsonl_path, 'r') as f:
lines = f.readlines()
video_info_list = [json.loads(line) for line in lines]
for video_info in video_info_list:
video_name = video_info['video']
prompt = video_info['prompt']
add_prompt = video_info['edit_prompt']
video_path = os.path.join('/home/wangjuntong/video_editing_dataset/all_sourse', video_name)
save_path = os.path.join(args.save_root, os.path.splitext(video_name)[0])
keyframes = load_video_keyframes(
video_path,
args.original_fps,
args.target_fps,
args.num_keyframes,
(args.H, args.W),
)
keyframes = keyframes.unsqueeze(0)
keyframes = rearrange(keyframes, "b t c h w -> b c t h w").to(model.device)
control_hint = keyframes
batch = {
"txt": [prompt],
"control_hint": control_hint,
}
negative_prompt = args.negative_prompt
batch_uc = {
"txt": [negative_prompt],
"control_hint": batch["control_hint"].clone(),
}
if add_prompt:
batch["txt"] = [add_prompt + ", " + prompt]
c, uc = model.conditioner.get_unconditional_conditioning(
batch_c=batch,
batch_uc=batch_uc,
)
sampling_kwargs = {}
for k in c:
if isinstance(c[k], torch.Tensor):
c[k], uc[k] = map(lambda y: y[k].to(model.device), (c, uc))
shape = (4, args.num_keyframes, args.H // 8, args.W // 8)
precision_scope = autocast
with torch.no_grad():
with torch.cuda.amp.autocast():
randn = torch.randn(1, *shape).to(model.device)
if args.sdedit_denoise_strength == 0.0:
def denoiser(input, sigma, c):
return model.denoiser(
model.model, input, sigma, c, **sampling_kwargs
)
if args.prior_coefficient_x != 0.0:
prior = model.encode_first_stage(keyframes)
randn = (
args.prior_coefficient_x * prior
+ args.prior_coefficient_noise * randn
)
sampler = init_sampling(
sample_steps=args.sample_steps,
sampler_name=args.sampler_name,
discretization_name=args.discretization_name,
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
cfg_scale=args.cfg_scale,
)
sampler.verbose = True
samples = sampler(denoiser, randn, c, uc=uc)
else:
assert (
args.sdedit_denoise_strength > 0.0
), "sdedit_denoise_strength should be positive"
assert (
args.sdedit_denoise_strength <= 1.0
), "sdedit_denoise_strength should be less than 1.0"
assert (
args.prior_coefficient_x == 0
), "prior_coefficient_x should be 0 when using sdedit_denoise_strength"
denoise_strength = args.sdedit_denoise_strength
sampler = init_sampling(
sample_steps=args.sample_steps,
sampler_name=args.sampler_name,
discretization_name=args.discretization_name,
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
cfg_scale=args.cfg_scale,
img2img_strength=denoise_strength,
)
sampler.verbose = True
z = model.encode_first_stage(keyframes)
noise = torch.randn_like(z)
sigmas = sampler.discretization(sampler.num_steps).to(z.device)
sigma = sigmas[0]
print(f"all sigmas: {sigmas}")
print(f"noising sigma: {sigma}")
noised_z = z + noise * append_dims(sigma, z.ndim)
noised_z = noised_z / torch.sqrt(
1.0 + sigmas[0] ** 2.0
)
def denoiser(x, sigma, c):
return model.denoiser(model.model, x, sigma, c)
samples = sampler(denoiser, noised_z, cond=c, uc=uc)
samples = model.decode_first_stage(samples)
samples = (torch.clamp(samples, -1.0, 1.0) + 1.0) / 2.0
os.makedirs(save_path, exist_ok=True)
perform_save_locally_video(
save_path,
samples,
args.target_fps,
args.save_type,
save_grid=False
)
print(f"Saved video to {save_path}")