|
import os |
|
import cv2 |
|
import torch |
|
import argparse |
|
from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler |
|
from diffusers.utils import load_image |
|
import numpy as np |
|
from PIL import Image |
|
|
|
def split_video_into_frames(video_path, frames_dir): |
|
if not os.path.exists(frames_dir): |
|
os.makedirs(frames_dir) |
|
print("splitting video") |
|
vidcap = cv2.VideoCapture(video_path) |
|
success, image = vidcap.read() |
|
count = 0 |
|
while success: |
|
frame_path = os.path.join(frames_dir, f"frame{count:04d}.png") |
|
cv2.imwrite(frame_path, image) |
|
success, image = vidcap.read() |
|
count += 1 |
|
|
|
def frame_number(frame_filename): |
|
|
|
return int(frame_filename[5:-4]) |
|
|
|
def count_frame_images(frames_dir): |
|
|
|
frame_files = [f for f in os.listdir(frames_dir) if f.startswith('frame') and f.endswith('.png')] |
|
return len(frame_files) |
|
|
|
|
|
parser = argparse.ArgumentParser(description='Generate images based on video frames.') |
|
parser.add_argument('--prompt', default='a woman', help='the stable diffusion prompt') |
|
parser.add_argument('--video_path', default='./None.mp4', help='Path to the input video file.') |
|
parser.add_argument('--frames_dir', default='./frames', help='Directory to save the extracted video frames.') |
|
parser.add_argument('--output_frames_dir', default='./output_frames', help='Directory to save the generated images.') |
|
parser.add_argument('--init_image_path', default=None, help='Path to the initial conditioning image.') |
|
|
|
args = parser.parse_args() |
|
|
|
video_path = args.video_path |
|
frames_dir = args.frames_dir |
|
output_frames_dir = args.output_frames_dir |
|
init_image_path = args.init_image_path |
|
prompt = args.prompt |
|
|
|
|
|
if count_frame_images(frames_dir) == 0: |
|
split_video_into_frames(video_path, frames_dir) |
|
|
|
|
|
if not os.path.exists(output_frames_dir): |
|
os.makedirs(output_frames_dir) |
|
|
|
|
|
if init_image_path: |
|
print(f"using image {init_image_path}") |
|
last_generated_image = load_image(init_image_path) |
|
else: |
|
initial_frame_path = os.path.join(frames_dir, "frame0000.png") |
|
last_generated_image = load_image(initial_frame_path) |
|
|
|
|
|
|
|
|