video_text / app.py
manjuvallayil's picture
Update app.py
3ccc973
import gradio as gr
import subprocess
import os
import whisper
from whisper.utils import write_vtt
model = whisper.load_model('tiny')
def convert_mp4_mp3(file, output="mp3"):
"""
Convert the Input Video files to Audio files (MP4 -> MP3)
using FFMPEG
"""
filename, ext = os.path.splitext(file)
subprocess.call(['ffmpeg', '-y', '-i', file, f'{filename}.{output}'],
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
return f"{filename}.{output}"
def transcribe(video):
"""
Transcribe the text in the video file using Whisper model
and write the transcribed captions to the video
"""
audio_file = convert_mp4_mp3(video)
# CFG
options = dict(beam_size=5, best_of=5, fp16=False)
translate_options = dict(task='translate', **options)
result = model.transcribe(audio_file, **translate_options)
output_dir = ''
# audio_path = audio_file.split('.')[0]
audio_path = os.path.splitext(os.path.basename(audio_file))[0]
# Write Subtitle onto a .vtt file
with open(os.path.join(output_dir, audio_path + '.vtt'), 'w') as f:
write_vtt(result['segments'], file=f)
return os.path.join(output_dir, audio_path + '.vtt')
block = gr.Blocks()
with block:
with gr.Group():
with gr.Box():
with gr.Row().style():
input_video = gr.Video(
label="Upload the video and click the button below",
type="filepath",
mirror_webcam=False
)
output_video = gr.File()
btn = gr.Button('Transcribe Video', type='primary')
btn.click(transcribe, inputs=[input_video], outputs=[output_video])
block.launch(enable_queue=True, debug=True)