chiyoi commited on
Commit
b0bdee4
1 Parent(s): d78535a
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -7,8 +7,10 @@ from moviepy.editor import VideoFileClip
7
  from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
8
  from ultralytics import YOLO
9
 
 
 
10
  from core.model import load_classifier
11
- from core.inference import FrameProcessor
12
  print("Tensorflow version " + tf.__version__)
13
 
14
  print('Load classifier.')
@@ -24,15 +26,27 @@ def fn(video: gr.Video):
24
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as f:
25
  output = f.name
26
  clip = VideoFileClip(video)
27
- process_frame = FrameProcessor(detector, classifier)
28
  processed_frames = []
29
- for frame in clip.iter_frames():
30
- processed_frames.append(process_frame(frame))
31
- yield processed_frames[-1], None
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  processed_clip = ImageSequenceClip(processed_frames, clip.fps)
33
  processed_clip.audio = clip.audio
34
- clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
35
- yield processed_frames[-1], output
36
 
37
  inputs = gr.Video(sources=['upload'], label='Input Video')
38
  outputs = [
 
7
  from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
8
  from ultralytics import YOLO
9
 
10
+ from configurations import *
11
+ from core.data import format_frame
12
  from core.model import load_classifier
13
+ from core.inference import detect_object, classify_action, draw_boxes
14
  print("Tensorflow version " + tf.__version__)
15
 
16
  print('Load classifier.')
 
26
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as f:
27
  output = f.name
28
  clip = VideoFileClip(video)
 
29
  processed_frames = []
30
+ frames = []
31
+ actions = []
32
+ detections = ([], [])
33
+ for i, frame in enumerate(clip.iter_frames()):
34
+ if i % classify_action_frame_steps == 0:
35
+ frames.append(format_frame(frame))
36
+ if i % detect_object_frame_steps == 0:
37
+ print(f'Detect object: Frame {i}')
38
+ detections = detect_object(detector, frame)
39
+ if len(frames) == classify_action_num_frames:
40
+ print(f'Classify action: Until frame {i}')
41
+ actions = classify_action(classifier, frames)
42
+ frames = []
43
+ frame = draw_boxes(frame, detections, actions)
44
+ processed_frames.append(frame)
45
+ yield frame, None
46
  processed_clip = ImageSequenceClip(processed_frames, clip.fps)
47
  processed_clip.audio = clip.audio
48
+ processed_clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
49
+ yield frame, output
50
 
51
  inputs = gr.Video(sources=['upload'], label='Input Video')
52
  outputs = [