Spaces:
Sleeping
Sleeping
File size: 6,042 Bytes
7064043 8d2001a ae4b7d0 9724d68 509eeda 8ce7fdb c0ff040 8ce7fdb 8d2001a 4a23dca 7064043 4916c5e 7064043 554ca8f 7064043 63ce649 85cb9b5 8d2001a 509eeda cf58581 8ce7fdb 8d2001a 8ce7fdb 7064043 8ce7fdb 86f6646 95eab87 7064043 95a9f65 7064043 6fc3067 95a9f65 40fe7c0 831716d 63ce649 95eab87 7064043 c97a584 7064043 9d7c1dd 40fe7c0 190fc64 31da1fb e399130 40fe7c0 695d1b1 7064043 9d7c1dd 7064043 2938c1f 7064043 831716d ae4b7d0 7064043 63ce649 7064043 63ce649 7064043 3b4ac51 7064043 63ce649 6fc3067 7064043 f50a5fd 7064043 40fe7c0 8d2001a 7064043 4916c5e 554ca8f 4916c5e cb73941 4916c5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# Dependencies, see also requirement.txt ;)
import gradio as gr
import cv2
import numpy as np
import os
from scenedetect import open_video, SceneManager
from scenedetect.detectors import ContentDetector
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
# —————————————————————————————————————————————————
title = "Scene Edit Detection"
description = "Gradio demo of PyScene scenedetect, to automatically find every shots in a video sequence, then save each shots as a splitted mp4 video chunk to download"
# —————————————————————————————————————————————————
# SET INPUTS
video_input = gr.Video(source="upload", format="mp4", label="Video Sequence");
# —————————————————————————————————————————————————
def convert_to_tuple(list):
return tuple(list);
def find_scenes(video_path, threshold=27.0):
# file name without extension
filename = os.path.splitext(os.path.basename(video_path))[0]
# Open our video, create a scene manager, and add a detector.
video = open_video(video_path)
scene_manager = SceneManager()
scene_manager.add_detector(
ContentDetector(threshold=threshold))
# Start detection
scene_manager.detect_scenes(video, show_progress=True)
scene_list = scene_manager.get_scene_list()
# Push the list of scenes into data_outputs
data_outputs.append(scene_list)
gradio_components_outputs.append("json")
#print(scene_list)
timecodes = []
shots = []
stills = []
# For each shot found, set entry and exit points as seconds from frame number
# Then split video into chunks and store them into shots List
# Then extract first frame of each shot as thumbnail for the gallery
for i, shot in enumerate(scene_list):
# STEP 1
# Get timecode in seconds
framerate = shot[0].get_framerate()
shot_in = shot[0].get_frames() / framerate
shot_out = shot[1].get_frames() / framerate
tc_in = shot[0].get_timecode()
tc_out = shot[1].get_timecode()
frame_in = shot[0].get_frames()
frame_out = shot[1].get_frames()
timecodes.append({"title": filename, "fps": framerate}
timecode = {"tc_in": tc_in, "tc_out": tc_out, "frame_in": frame_in, "frame_out": frame_out}
timecodes.append(timecode)
# Set name template for each shot
target_name = "shot_" + str(i+1) + "_" + filename + ".mp4"
# Split chunk
ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
# Push chunk into shots List
shots.append(target_name)
# Push each chunk into data_outputs
data_outputs.append(target_name)
gradio_components_outputs.append("video")
# —————————————————————————————————————————————————
# STEP 2
# extract first frame of each shot with cv2
video = cv2.VideoCapture(video_path)
fps = video.get(cv2.CAP_PROP_FPS)
print('frames per second =',fps)
frame_id = shot[0].get_frames() # value from scene_list from step 1
video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
ret, frame = video.read()
# Save frame as PNG file
img = str(frame_id) + '_screenshot.png'
cv2.imwrite(img,frame)
# Push image into stills List
stills.append(img)
# Push the list of video shots into data_outputs for Gradio file component
data_outputs.append(shots)
gradio_components_outputs.append("file")
# Push the list of still images into data_outputs
data_outputs.append(stills)
gradio_components_outputs.append("gallery")
# This would have been used as gradio outputs,
# if we could set number of outputs after the interface launch
# That's not (yet ?) possible
results = convert_to_tuple(data_outputs)
print(results)
# return List of shots as JSON, List of video chunks, List of still images
# *
# Would be nice to be able to return my results tuple as outputs,
# while number of chunks found is not fixed:
# return results
return timecodes, shots, stills
# —————————————————————————————————————————————————
# SET DATA AND COMPONENTS OUTPUTS
# This would be filled like this:
# data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
data_outputs = []
# This would be filled like this:
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
gradio_components_outputs = []
#SET OUTPUTS
# This would be nice if number of outputs could be set after Interface Launch:
# because we do not know how many shots will be detected
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
# outputs = gradio_components_outputs
# ANOTHER SOLUTION WOULD BE USING A (FUTURE ?) "VIDEO GALLERY" GRADIO COMPONENT FROM LIST :)
outputs = [gr.JSON(label="Shots detected"), gr.File(label="Downloadable Shots"), gr.Gallery(label="Still Images from each shot").style(grid=3)]
# —————————————————————————————————————————————————
print('Hello Sylvain')
gr.Interface(fn=find_scenes, inputs=video_input, outputs=outputs, title=title, description=description).launch() |