fffiloni commited on
Commit
7064043
1 Parent(s): a953f25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -25
app.py CHANGED
@@ -1,73 +1,126 @@
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
 
5
  from scenedetect import open_video, SceneManager
6
  from scenedetect.detectors import ContentDetector
7
- #from scenedetect.video_splitter import split_video_ffmpeg
8
 
9
  from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
10
 
11
- outputs = ["json", "file", "gallery"]
 
 
 
 
 
 
 
 
 
 
12
  data_outputs = []
13
 
 
 
 
14
 
15
- def fn(list):
16
- return tuple(list);
 
 
17
 
 
 
 
 
18
 
19
 
20
  def find_scenes(video_path, threshold=27.0):
 
21
  # Open our video, create a scene manager, and add a detector.
22
  video = open_video(video_path)
23
  scene_manager = SceneManager()
24
  scene_manager.add_detector(
25
  ContentDetector(threshold=threshold))
 
 
26
  scene_manager.detect_scenes(video, show_progress=True)
27
  scene_list = scene_manager.get_scene_list()
28
 
29
- #outputs.append("json")
30
  data_outputs.append(scene_list)
 
31
  #print(scene_list)
32
 
33
  shots = []
34
  stills = []
35
 
36
- for i, scene in enumerate(scene_list):
37
- shot_in = scene[0].get_frames() / scene[0].get_framerate()
38
- shot_out = (scene[1].get_frames() - 1) / scene[0].get_framerate()
 
 
 
 
 
 
 
 
 
39
  target_name = str(i)+"_cut.mp4"
 
 
40
  ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
41
- data_outputs.append(target_name)
 
42
  shots.append(target_name)
43
 
 
 
 
 
 
 
 
 
44
  video = cv2.VideoCapture(video_path)
45
-
46
  fps = video.get(cv2.CAP_PROP_FPS)
47
  print('frames per second =',fps)
48
-
49
- frame_id = scene[0].get_frames()
50
 
51
  video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
52
  ret, frame = video.read()
53
 
54
- # Display and save frame
55
- #cv2.imshow('frame', frame); cv2.waitKey(0)
56
  img = str(frame_id) + '_screenshot.png'
57
  cv2.imwrite(img,frame)
 
 
58
  stills.append(img)
59
- #outputs.append("video")
60
- #shot_in = scene_list[1][0].get_frames() / scene_list[1][0].get_framerate()
61
- #shot_out = (scene_list[1][1].get_frames() - 1) / scene_list[1][0].get_framerate()
62
- #print(shot_in, shot_out)
63
 
64
- results = fn(data_outputs)
65
- print(results)
66
- print(stills)
67
- #ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname="cut.mp4")
 
 
 
68
 
 
 
 
 
 
 
 
 
 
 
 
69
  return scene_list, shots, stills
70
-
71
- video_input=gr.Video(source="upload", format="mp4");
72
 
73
- gr.Interface(fn=find_scenes, inputs=video_input, outputs=outputs).launch()
 
 
 
1
+ # Dependencies, see also requirement.txt ;)
2
  import gradio as gr
3
  import cv2
4
  import numpy as np
5
 
6
  from scenedetect import open_video, SceneManager
7
  from scenedetect.detectors import ContentDetector
 
8
 
9
  from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
10
 
11
+ # —————————————————————————————————————————————————
12
+
13
+ title = "Scene Edit Detection"
14
+ description = "Gradio demo of PyScene scenedetect, to automatically find every shots in a video sequence, then save each shots as a splitted mp4 video chunk to download"
15
+
16
+ # SET INPUTS
17
+ video_input = gr.Video(source="upload", format="mp4");
18
+
19
+ # SET DATA AND COMPONENTS OUTPUTS
20
+ # This would be filled like this:
21
+ # data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
22
  data_outputs = []
23
 
24
+ # This would be filled like this:
25
+ # gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
26
+ gradio_components_outputs = []
27
 
28
+ # This would be nice if number of outputs could be set after Interface Launch:
29
+ # gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
30
+ # outputs = gradio_components_outputs
31
+ working_outputs = ["json", "file", "gallery"]
32
 
33
+ # —————————————————————————————————————————————————
34
+
35
+ def convert_to_tuple(list):
36
+ return tuple(list);
37
 
38
 
39
  def find_scenes(video_path, threshold=27.0):
40
+
41
  # Open our video, create a scene manager, and add a detector.
42
  video = open_video(video_path)
43
  scene_manager = SceneManager()
44
  scene_manager.add_detector(
45
  ContentDetector(threshold=threshold))
46
+
47
+ # Start detection
48
  scene_manager.detect_scenes(video, show_progress=True)
49
  scene_list = scene_manager.get_scene_list()
50
 
51
+ # Push the list of scenes into data_outputs
52
  data_outputs.append(scene_list)
53
+ gradio_components_outputs.append("json")
54
  #print(scene_list)
55
 
56
  shots = []
57
  stills = []
58
 
59
+ # For each shot found, set entry and exit points as seconds from frame number
60
+ # Then split video into chunks and store them into shots List
61
+ # Then extract first frame of each shot as thumbnail for the gallery
62
+ for i, shot in enumerate(scene_list):
63
+
64
+ # STEP 1
65
+ # Get timecode in seconds
66
+ framerate = shot[0].get_framerate()
67
+ shot_in = shot[0].get_frames() / framerate
68
+ shot_out = (shot[1].get_frames() - 1) / framerate
69
+
70
+ # Set name template for each shot
71
  target_name = str(i)+"_cut.mp4"
72
+
73
+ # Split chunk
74
  ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
75
+
76
+ # Push chunk into shots List
77
  shots.append(target_name)
78
 
79
+ # Push each chunk into data_outputs
80
+ data_outputs.append(target_name)
81
+ gradio_components_outputs.append("video")
82
+
83
+ # —————————————————————————————————————————————————
84
+
85
+ # STEP 2
86
+ # extract first frame of each shot with cv2
87
  video = cv2.VideoCapture(video_path)
 
88
  fps = video.get(cv2.CAP_PROP_FPS)
89
  print('frames per second =',fps)
90
+
91
+ frame_id = shot[0].get_frames() # value from scene_list from step 1
92
 
93
  video.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
94
  ret, frame = video.read()
95
 
96
+ # Save frame as PNG file
 
97
  img = str(frame_id) + '_screenshot.png'
98
  cv2.imwrite(img,frame)
99
+
100
+ # Push image into stills List
101
  stills.append(img)
 
 
 
 
102
 
103
+ # Push the list of video shots into data_outputs for Gradio file component
104
+ data_outputs.append(shots)
105
+ gradio_components_outputs.append("file")
106
+
107
+ # Push the list of still images into data_outputs
108
+ data_outputs.append(stills)
109
+ gradio_components_outputs.append("gallery")
110
 
111
+ # This would have been used as gradio outputs,
112
+ # if we could set number of outputs after the interface launch
113
+ # That's not (yet ?) possible
114
+ results = convert_to_tuple(data_outputs)
115
+ print(results)
116
+
117
+ # return List of shots as JSON, List of video chunks, List of still images
118
+ # *
119
+ # Would be nice to be able to return my results tuple as outputs,
120
+ # while number of chunks found is not fixed:
121
+ # return results
122
  return scene_list, shots, stills
 
 
123
 
124
+ # —————————————————————————————————————————————————
125
+
126
+ gr.Interface(fn=find_scenes, inputs=video_input, outputs=working_outputs, title=title, description=description).launch()