File size: 4,907 Bytes
7064043
8d2001a
ae4b7d0
9724d68
8ce7fdb
c0ff040
8ce7fdb
8d2001a
4a23dca
 
7064043
 
 
 
 
 
 
 
 
 
 
95a9f65
 
7064043
 
 
ae4b7d0
7064043
 
 
 
95a9f65
7064043
 
 
 
63ce649
85cb9b5
8d2001a
7064043
8ce7fdb
8d2001a
 
 
8ce7fdb
7064043
 
8ce7fdb
86f6646
95eab87
7064043
95a9f65
7064043
6fc3067
95a9f65
831716d
63ce649
95eab87
7064043
 
 
 
 
 
 
 
 
 
 
 
2938c1f
7064043
 
2938c1f
7064043
 
831716d
ae4b7d0
7064043
 
 
 
 
 
 
 
63ce649
 
 
7064043
 
63ce649
 
 
 
7064043
3b4ac51
 
7064043
 
63ce649
6fc3067
7064043
 
 
 
 
 
 
f50a5fd
7064043
 
 
 
 
 
 
 
 
 
 
a953f25
8d2001a
7064043
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Dependencies, see also requirement.txt ;)
import gradio as gr
import cv2 
import numpy as np

from scenedetect import open_video, SceneManager
from scenedetect.detectors import ContentDetector

from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip

# —————————————————————————————————————————————————

title = "Scene Edit Detection"
description = "Gradio demo of PyScene scenedetect, to automatically find every shots in a video sequence, then save each shots as a splitted mp4 video chunk to download"

# SET INPUTS
video_input = gr.Video(source="upload", format="mp4");

# SET DATA AND COMPONENTS OUTPUTS
# This would be filled like this:
# data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
data_outputs = []

# This would be filled like this:
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
gradio_components_outputs = []

# This would be nice if number of outputs could be set after Interface Launch:         
# gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
# outputs = gradio_components_outputs
working_outputs = ["json", "file", "gallery"]

# —————————————————————————————————————————————————

def convert_to_tuple(list):
    return tuple(list);


def find_scenes(video_path, threshold=27.0):
    
    # Open our video, create a scene manager, and add a detector.
    video = open_video(video_path)
    scene_manager = SceneManager()
    scene_manager.add_detector(
        ContentDetector(threshold=threshold))
    
    # Start detection 
    scene_manager.detect_scenes(video, show_progress=True)
    scene_list = scene_manager.get_scene_list()
    
    # Push the list of scenes into data_outputs
    data_outputs.append(scene_list)
    gradio_components_outputs.append("json")
    #print(scene_list)
    
    shots = []
    stills = []
    
    # For each shot found, set entry and exit points as seconds from frame number
    # Then split video into chunks and store them into shots List
    # Then extract first frame of each shot as thumbnail for the gallery
    for i, shot in enumerate(scene_list):
        
        # STEP 1
        # Get timecode in seconds
        framerate = shot[0].get_framerate()
        shot_in = shot[0].get_frames() / framerate
        shot_out = (shot[1].get_frames() - 1) / framerate
        
        # Set name template for each shot
        target_name = str(i)+"_cut.mp4"
        
        # Split chunk
        ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
        
        # Push chunk into shots List
        shots.append(target_name)
        
        # Push each chunk into data_outputs
        data_outputs.append(target_name)
        gradio_components_outputs.append("video")
        
        # —————————————————————————————————————————————————
        
        # STEP 2
        # extract first frame of each shot with cv2
        video = cv2.VideoCapture(video_path)
        fps = video.get(cv2.CAP_PROP_FPS)
        print('frames per second =',fps)
        
        frame_id = shot[0].get_frames() # value from scene_list from step 1

        video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
        ret, frame = video.read()

        # Save frame as PNG file
        img = str(frame_id) + '_screenshot.png'
        cv2.imwrite(img,frame)
        
        # Push image into stills List
        stills.append(img)
    
    # Push the list of video shots into data_outputs for Gradio file component
    data_outputs.append(shots)
    gradio_components_outputs.append("file")
        
    # Push the list of still images into data_outputs
    data_outputs.append(stills)
    gradio_components_outputs.append("gallery")
    
    # This would have been used as gradio outputs, 
    # if we could set number of outputs after the interface launch
    # That's not (yet ?) possible
    results = convert_to_tuple(data_outputs)    
    print(results)
 
    # return List of shots as JSON, List of video chunks, List of still images  
    # *
    # Would be nice to be able to return my results tuple as outputs, 
    # while number of chunks found is not fixed:
    # return results 
    return scene_list, shots, stills

# —————————————————————————————————————————————————

gr.Interface(fn=find_scenes, inputs=video_input, outputs=working_outputs, title=title, description=description).launch()