File size: 2,482 Bytes
522db09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f2539b
 
 
 
 
522db09
 
9f2539b
522db09
 
 
 
 
 
 
 
 
 
 
 
 
9f2539b
 
522db09
 
 
 
 
 
 
 
 
 
 
 
 
 
9f2539b
522db09
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import cv2
import torch
import argparse
from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import load_image
import numpy as np
from PIL import Image

def split_video_into_frames(video_path, frames_dir):
    if not os.path.exists(frames_dir):
        os.makedirs(frames_dir)
    print("splitting video")
    vidcap = cv2.VideoCapture(video_path)
    success, image = vidcap.read()
    count = 0
    while success:
        frame_path = os.path.join(frames_dir, f"frame{count:04d}.png")
        cv2.imwrite(frame_path, image)
        success, image = vidcap.read()
        count += 1

def frame_number(frame_filename):
    # Extract the frame number from the filename and convert it to an integer
    return int(frame_filename[5:-4])

def count_frame_images(frames_dir):
    # Count the number of frame images in the directory
    frame_files = [f for f in os.listdir(frames_dir) if f.startswith('frame') and f.endswith('.png')]
    return len(frame_files)

# Argument parser
parser = argparse.ArgumentParser(description='Generate images based on video frames.')
parser.add_argument('--prompt', default='a woman', help='the stable diffusion prompt')
parser.add_argument('--video_path', default='./None.mp4', help='Path to the input video file.')
parser.add_argument('--frames_dir', default='./frames', help='Directory to save the extracted video frames.')
parser.add_argument('--output_frames_dir', default='./output_frames', help='Directory to save the generated images.')
parser.add_argument('--init_image_path', default=None, help='Path to the initial conditioning image.')

args = parser.parse_args()

video_path = args.video_path
frames_dir = args.frames_dir
output_frames_dir = args.output_frames_dir
init_image_path = args.init_image_path
prompt = args.prompt

# If frame images do not already exist, split video into frames
if count_frame_images(frames_dir) == 0:
    split_video_into_frames(video_path, frames_dir)

# Create output frames directory if it doesn't exist
if not os.path.exists(output_frames_dir):
    os.makedirs(output_frames_dir)

# Load the initial conditioning image, if provided
if init_image_path:
    print(f"using image {init_image_path}")
    last_generated_image = load_image(init_image_path)
else:
    initial_frame_path = os.path.join(frames_dir, "frame0000.png")
    last_generated_image = load_image(initial_frame_path)

# ... (rest of the script remains unchanged)