chiyoi commited on
Commit
92ddd3a
1 Parent(s): 887ff54

update: Add examples.

Browse files
.gitattributes CHANGED
@@ -11,6 +11,7 @@
11
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
 
14
  *.msgpack filter=lfs diff=lfs merge=lfs -text
15
  *.npy filter=lfs diff=lfs merge=lfs -text
16
  *.npz filter=lfs diff=lfs merge=lfs -text
 
11
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
16
  *.npy filter=lfs diff=lfs merge=lfs -text
17
  *.npz filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,24 +1,14 @@
1
- import os
2
- import tempfile
3
  import gradio as gr
4
  import tensorflow as tf
5
  from moviepy.editor import VideoFileClip
6
  from ultralytics import YOLO
7
 
 
8
  from core.model import load_classifier
9
  from core.inference import FrameProcessor
10
 
11
  print("Tensorflow version " + tf.__version__)
12
 
13
- id_to_name = {
14
- 0: 'Flying',
15
- 1: 'Landing',
16
- 2: 'Other',
17
- 3: 'Straight Taxiing',
18
- 4: 'Takeoff',
19
- 5: 'Turning Maneuver',
20
- }
21
-
22
  print('Load classifier.')
23
  classifier_path = 'weights/classifier-7.keras'
24
  classifier = load_classifier(classifier_path)
@@ -29,20 +19,33 @@ detector = YOLO(detector_path)
29
 
30
  def fn(video: gr.Video):
31
  print('Process video.')
32
- with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as f:
33
- output = f.name
34
- clip = VideoFileClip(video)
35
- process_frame = FrameProcessor(detector, classifier, id_to_name)
36
- clip = clip.fl_image(process_frame)
37
- clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
 
 
 
 
 
 
 
38
  return output
39
 
40
  inputs = gr.Video(sources=['upload'], label='Input Video')
41
  outputs = gr.Video(interactive=False, label='Aeroplane Position and Action Marked')
42
 
 
 
 
 
 
43
  iface = gr.Interface(
44
  fn=fn,
45
  inputs=inputs,
46
  outputs=outputs,
 
47
  )
48
  iface.launch()
 
 
 
1
  import gradio as gr
2
  import tensorflow as tf
3
  from moviepy.editor import VideoFileClip
4
  from ultralytics import YOLO
5
 
6
+ from core.data import ClassMapping
7
  from core.model import load_classifier
8
  from core.inference import FrameProcessor
9
 
10
  print("Tensorflow version " + tf.__version__)
11
 
 
 
 
 
 
 
 
 
 
12
  print('Load classifier.')
13
  classifier_path = 'weights/classifier-7.keras'
14
  classifier = load_classifier(classifier_path)
 
19
 
20
  def fn(video: gr.Video):
21
  print('Process video.')
22
+ output = f'Marked-{str(video)}'
23
+ clip = VideoFileClip(video)
24
+ id_to_name = {
25
+ 0: 'Flying',
26
+ 1: 'Landing',
27
+ 2: 'Other',
28
+ 3: 'Straight Taxiing',
29
+ 4: 'Takeoff',
30
+ 5: 'Turning Maneuver',
31
+ }
32
+ process_frame = FrameProcessor(detector, classifier, id_to_name)
33
+ clip = clip.fl_image(process_frame)
34
+ clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
35
  return output
36
 
37
  inputs = gr.Video(sources=['upload'], label='Input Video')
38
  outputs = gr.Video(interactive=False, label='Aeroplane Position and Action Marked')
39
 
40
+ examples = [
41
+ ['examples/ZFLFDfovqls_001310_001320.mp4'] # cspell: disable-line
42
+ ['examples/Zv7GyH-fpEY_2023.0_2033.0.mp4']
43
+ ]
44
+
45
  iface = gr.Interface(
46
  fn=fn,
47
  inputs=inputs,
48
  outputs=outputs,
49
+ examples=examples,
50
  )
51
  iface.launch()
examples/ZFLFDfovqls_001310_001320.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf18a000dac495924649008dfe29a413657e2cbd9c39400f4980867de39dba4
3
+ size 669598
examples/Zv7GyH-fpEY_2023.0_2033.0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec949ee31a5e3b95f0b12a67c2abdcaf8d2e4fdab6dcae81d9cf563f77851a1
3
+ size 1055493