Spaces:
Sleeping
Sleeping
File size: 1,770 Bytes
b44c3e2 160ded7 0faef99 139dd3e 160ded7 139dd3e 0faef99 139dd3e 160ded7 139dd3e b44c3e2 160ded7 b44c3e2 160ded7 139dd3e 160ded7 139dd3e 92ddd3a 4f30d97 160ded7 92ddd3a 139dd3e 160ded7 139dd3e 92ddd3a 160ded7 0faef99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import tempfile
import numpy as np
import gradio as gr
import tensorflow as tf
from moviepy.editor import VideoFileClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
from ultralytics import YOLO
from core.model import load_classifier
from core.inference import FrameProcessor
print("Tensorflow version " + tf.__version__)
print('Load classifier.')
classifier_path = 'weights/classifier-8-epoch10.keras'
classifier = load_classifier(classifier_path)
print('Load detector.')
detector_path = 'weights/yolov8n.pt'
detector = YOLO(detector_path)
def fn(video: gr.Video):
print('Process video.')
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as f:
output = f.name
clip = VideoFileClip(video)
process_frame = FrameProcessor(detector, classifier)
processed_frames = []
for frame in clip.iter_frames():
processed_frames.append(process_frame(frame))
yield processed_frames[-1], None
processed_clip = ImageSequenceClip(processed_frames, clip.fps)
processed_clip.audio = clip.audio
clip.write_videofile(output, fps=clip.fps, audio_codec='aac', logger=None)
yield processed_frames[-1], output
inputs = gr.Video(sources=['upload'], label='Input Video')
outputs = [
gr.Image(interactive=False, label='Last Frame Processed'),
gr.Video(interactive=False, label='Aeroplane Position and Action Marked')]
examples = [
['examples/ZFLFDfovqls_001310_001320.mp4'], # cspell: disable-line
['examples/Zv7GyH-fpEY_2023.0_2033.0.mp4']]
iface = gr.Interface(
title='Aeroplane Position and Action Detection',
description='Detect aeroplane position and action in a video.',
theme='soft',
fn=fn,
inputs=inputs,
outputs=outputs,
examples=examples,
cache_examples=False)
iface.launch()
|