cuda / app.py
tomriddle's picture
Duplicate from ayaanzaveri/faster-whisper-api
4af4e17
raw history blame
No virus
2.62 kB
import pathlib
from faster_whisper import WhisperModel
import yt_dlp
import uuid
import os
import gradio as gr
from tqdm import tqdm
# List of all supported video sites here https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md
def download_convert_video_to_audio(
yt_dlp,
video_url: str,
destination_path: pathlib.Path,
) -> None:
ydl_opts = {
"format": "bestaudio/best",
"postprocessors": [
{ # Extract audio using ffmpeg
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
}
],
"outtmpl": f"{destination_path}.%(ext)s",
}
try:
print(f"Downloading video from {video_url}")
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download(video_url)
print(f"Downloaded video from {video_url} to {destination_path}")
except Exception as e:
raise (e)
def segment_to_dict(segment):
segment = segment._asdict()
if segment["words"] is not None:
segment["words"] = [word._asdict() for word in segment["words"]]
return segment
def download_video(video_url: str):
download_convert_video_to_audio(yt_dlp, video_url, f"{uuid.uuid4().hex}")
def transcribe_video(video_url: str, word_timestamps: bool = True, model_size: str = "tiny"):
print(word_timestamps)
print("loading model")
model = WhisperModel(model_size, device="cpu", compute_type="int8")
# model = WhisperModel(model_size, device="cuda", compute_type="float16")
print("getting hex")
rand_id = uuid.uuid4().hex
print("doing download")
download_convert_video_to_audio(yt_dlp, video_url, f"{rand_id}")
segments, info = model.transcribe(f"{rand_id}.mp3", beam_size=5, word_timestamps=word_timestamps)
segments = [segment_to_dict(segment) for segment in segments]
total_duration = round(info.duration, 2) # Same precision as the Whisper timestamps.
print(info)
os.remove(f"{rand_id}.mp3")
print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
print(segments)
return segments
# print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
# for segment in segments:
# print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
demo = gr.Interface(fn=transcribe_video, inputs=[
gr.Textbox(label="Video URL"),
gr.Checkbox(label="Word Timestamps", info="Do you want word timestamps in the response?"),
gr.Dropdown(label="Model", value="tiny", choices=["tiny", "base", "small"])
], outputs="text")
demo.launch()