File size: 2,620 Bytes
b44c3e2
60dc102
0faef99
139dd3e
 
160ded7
0faef99
5b889a9
 
 
049f239
5b889a9
 
139dd3e
5b889a9
 
049f239
c23be95
139dd3e
 
5b889a9
139dd3e
 
 
b44c3e2
 
 
160ded7
b0bdee4
 
 
 
049f239
b0414c0
049f239
b0bdee4
 
5b889a9
b0bdee4
5b889a9
b0bdee4
 
 
5b889a9
049f239
 
 
60dc102
dd478fa
60dc102
160ded7
 
b0bdee4
 
139dd3e
 
160ded7
 
 
139dd3e
92ddd3a
4f30d97
160ded7
92ddd3a
139dd3e
160ded7
 
 
139dd3e
 
 
92ddd3a
160ded7
0faef99
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import tempfile
import cv2
import gradio as gr
import tensorflow as tf
from moviepy.editor import VideoFileClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip

from configuration import Config
from model import load_classifier, load_detector
from inference import format_frame, detect_object, classify_action, draw_boxes

config = Config()
print(f'TensorFlow {tf.__version__}')

print(f'Load classifier from {config.classifier_path}')
classifier = load_classifier(config)
classifier.trainable = False
classifier.summary()

print('Load detector.')
detector = load_detector(config)

def fn(video: gr.Video):
  print('Process video.')
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as f:
    output = f.name
    clip = VideoFileClip(video)
    processed_frames = []
    frames = []
    actions = []
    detections = ([], [])
    for i, frame in enumerate(clip.iter_frames()):
      if i % config.classify_action_frame_step == 0:
        frames.append(format_frame(frame, config))
      if i % config.detect_object_frame_step == 0:
        print(f'Detect object: Frame {i}')
        detections = detect_object(detector, frame)
      if len(frames) == config.classify_action_num_frames:
        print(f'Classify action: Until frame {i}')
        actions = classify_action(classifier, frames, config.id_to_name)
        frames = []
      frame = draw_boxes(frame, detections, actions)
      processed_frames.append(frame)
      if i % config.yield_frame_steps == 0:
        quality = 9
        image_array = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        _, image_encoded = cv2.imencode('.jpg', image_array, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
        with tempfile.NamedTemporaryFile(suffix='.jpeg') as f:
          f.write(image_encoded)
          yield f.name, None
    processed_clip = ImageSequenceClip(processed_frames, clip.fps)
    processed_clip.audio = clip.audio
    processed_clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
  yield frame, output

inputs = gr.Video(sources=['upload'], label='Input Video')
outputs = [
  gr.Image(interactive=False, label='Last Frame Processed'),
  gr.Video(interactive=False, label='Aeroplane Position and Action Marked')]

examples = [
  ['examples/ZFLFDfovqls_001310_001320.mp4'], # cspell: disable-line
  ['examples/Zv7GyH-fpEY_2023.0_2033.0.mp4']]

iface = gr.Interface(
  title='Aeroplane Position and Action Detection',
  description='Detect aeroplane position and action in a video.',
  theme='soft',
  fn=fn,
  inputs=inputs,
  outputs=outputs,
  examples=examples,
  cache_examples=False)
iface.launch()