chiyoi commited on
Commit
b44c3e2
1 Parent(s): 4f30d97
Files changed (1) hide show
  1. app.py +16 -15
app.py CHANGED
@@ -1,14 +1,22 @@
 
1
  import gradio as gr
2
  import tensorflow as tf
3
  from moviepy.editor import VideoFileClip
4
  from ultralytics import YOLO
5
 
6
- from core.data import ClassMapping
7
  from core.model import load_classifier
8
  from core.inference import FrameProcessor
9
-
10
  print("Tensorflow version " + tf.__version__)
11
 
 
 
 
 
 
 
 
 
 
12
  print('Load classifier.')
13
  classifier_path = 'weights/classifier-7.keras'
14
  classifier = load_classifier(classifier_path)
@@ -19,19 +27,12 @@ detector = YOLO(detector_path)
19
 
20
  def fn(video: gr.Video):
21
  print('Process video.')
22
- output = f'Marked-{str(video)}'
23
- clip = VideoFileClip(video)
24
- id_to_name = {
25
- 0: 'Flying',
26
- 1: 'Landing',
27
- 2: 'Other',
28
- 3: 'Straight Taxiing',
29
- 4: 'Takeoff',
30
- 5: 'Turning Maneuver',
31
- }
32
- process_frame = FrameProcessor(detector, classifier, id_to_name)
33
- clip = clip.fl_image(process_frame)
34
- clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
35
  return output
36
 
37
  inputs = gr.Video(sources=['upload'], label='Input Video')
 
1
+ import tempfile
2
  import gradio as gr
3
  import tensorflow as tf
4
  from moviepy.editor import VideoFileClip
5
  from ultralytics import YOLO
6
 
 
7
  from core.model import load_classifier
8
  from core.inference import FrameProcessor
 
9
  print("Tensorflow version " + tf.__version__)
10
 
11
+ id_to_name = {
12
+ 0: 'Flying',
13
+ 1: 'Landing',
14
+ 2: 'Other',
15
+ 3: 'Straight Taxiing',
16
+ 4: 'Takeoff',
17
+ 5: 'Turning Maneuver',
18
+ }
19
+
20
  print('Load classifier.')
21
  classifier_path = 'weights/classifier-7.keras'
22
  classifier = load_classifier(classifier_path)
 
27
 
28
  def fn(video: gr.Video):
29
  print('Process video.')
30
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as f:
31
+ output = f.name
32
+ clip = VideoFileClip(video)
33
+ process_frame = FrameProcessor(detector, classifier, id_to_name)
34
+ clip = clip.fl_image(process_frame)
35
+ clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
 
 
 
 
 
 
 
36
  return output
37
 
38
  inputs = gr.Video(sources=['upload'], label='Input Video')