import gradio as gr from image_to_video import model_i2v_fun, get_input, auto_inpainting, setup_seed from omegaconf import OmegaConf import torch from diffusers.utils.import_utils import is_xformers_available import torchvision from utils import mask_generation_before import os import cv2 os.system('wget https://huggingface.co/Vchitect/SEINE/resolve/main/seine.pt?download=true -o pre-trained/') config_path = "./configs/sample_i2v.yaml" args = OmegaConf.load(config_path) device = "cuda" if torch.cuda.is_available() else "cpu" css = """ h1 { text-align: center; } #component-0 { max-width: 730px; margin: auto; } """ def infer(prompt, image_inp, seed_inp, ddim_steps): setup_seed(seed_inp) args.num_sampling_steps = ddim_steps ###先测试Image的返回类型 print(prompt, seed_inp, ddim_steps, type(image_inp)) img = cv2.imread(image_inp) new_size = [img.shape[0],img.shape[1]] # if(img.shape[0]==512 and img.shape[1]==512): # args.image_size = [512,512] # elif(img.shape[0]==320 and img.shape[1]==512): # args.image_size = [320, 512] # elif(img.shape[0]==292 and img.shape[1]==512): # args.image_size = [292,512] # else: # raise ValueError("Please enter image of right size") # print(args.image_size) args.image_size = new_size vae, model, text_encoder, diffusion = model_i2v_fun(args) vae.to(device) model.to(device) text_encoder.to(device) if args.use_fp16: vae.to(dtype=torch.float16) model.to(dtype=torch.float16) text_encoder.to(dtype=torch.float16) if args.enable_xformers_memory_efficient_attention and device=="cuda": if is_xformers_available(): model.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") video_input, reserve_frames = get_input(image_inp, args) video_input = video_input.to(device).unsqueeze(0) mask = mask_generation_before(args.mask_type, video_input.shape, video_input.dtype, device) masked_video = video_input * (mask == 0) prompt = prompt + args.additional_prompt video_clip = auto_inpainting(args, video_input, masked_video, mask, prompt, vae, text_encoder, diffusion, model, device,) video_ = ((video_clip * 0.5 + 0.5) * 255).add_(0.5).clamp_(0, 255).to(dtype=torch.uint8).cpu().permute(0, 2, 3, 1) torchvision.io.write_video(os.path.join(args.save_img_path, prompt+ '.mp4'), video_, fps=8) # video = model_i2V(prompt, image_inp, seed_inp, ddim_steps) return os.path.join(args.save_img_path, prompt+ '.mp4') def clean(): # return gr.Image.update(value=None, visible=False), gr.Video.update(value=None) return gr.Video.update(value=None) title = """
Apply SEINE to generate a video