sign_translate / app.py
oronird's picture
Create app.py
6e0339f
import gradio as gr
import numpy as np
import cv2
import pickle
#from tensorflow import keras
from stickman import image_pos_hands
from video_processing import frames_for_video, frames_to_video
def AWSL(path_video, recognition_type ):
frames = frames_for_video(path_video)
frames2 = [image_pos_hands(img,
output_image_res=(448,448)) for img in frames]
frames_array = np.array(frames2)
vid_new_path = "test.mp4"
frames_to_video(frames2, vid_new_path)
# model
model_path = "./inception_stickman_50.pkl"
#model = pickle.load(open(model_path, "rb"))
return (vid_new_path, vid_new_path)
iface = gr.Interface(fn=AWSL,
title="Video to Recognition",
description="Recognition the American Sign Language and translate it into English",
inputs=[gr.inputs.Video(label="Upload Video File"),
gr.inputs.Radio(label="Choose translation", choices=['Sign', 'Sentence'])
],
#examples=[[test_video_path]],
outputs=[gr.Video(label="Generated Video"),
"text"])
iface.launch()