|
import os |
|
import tempfile |
|
import time |
|
|
|
import gradio as gr |
|
import numpy as np |
|
import torch |
|
import yt_dlp as youtube_dl |
|
from gradio_client import Client |
|
from pyannote.audio import Pipeline |
|
from transformers.pipelines.audio_utils import ffmpeg_read |
|
|
|
|
|
YT_LENGTH_LIMIT_S = 36000 |
|
SAMPLING_RATE = 16000 |
|
|
|
API_URL = "https://sanchit-gandhi-whisper-jax.hf.space/" |
|
HF_TOKEN = os.environ.get("HF_TOKEN") |
|
|
|
|
|
client = Client(API_URL) |
|
|
|
|
|
diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=HF_TOKEN) |
|
|
|
|
|
def format_string(timestamp): |
|
""" |
|
Reformat a timestamp string from (HH:)MM:SS to float seconds. Note that the hour column |
|
is optional, and is appended within the function if not input. |
|
|
|
Args: |
|
timestamp (str): |
|
Timestamp in string format, either MM:SS or HH:MM:SS. |
|
Returns: |
|
seconds (float): |
|
Total seconds corresponding to the input timestamp. |
|
""" |
|
split_time = timestamp.split(":") |
|
split_time = [float(sub_time) for sub_time in split_time] |
|
|
|
if len(split_time) == 2: |
|
split_time.insert(0, 0) |
|
|
|
seconds = split_time[0] * 3600 + split_time[1] * 60 + split_time[2] |
|
return seconds |
|
|
|
|
|
|
|
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."): |
|
""" |
|
Reformat a timestamp from a float of seconds to a string in format (HH:)MM:SS. Note that the hour |
|
column is optional, and is appended in the function if the number of hours > 0. |
|
|
|
Args: |
|
seconds (float): |
|
Total seconds corresponding to the input timestamp. |
|
Returns: |
|
timestamp (str): |
|
Timestamp in string format, either MM:SS or HH:MM:SS. |
|
""" |
|
if seconds is not None: |
|
milliseconds = round(seconds * 1000.0) |
|
|
|
hours = milliseconds // 3_600_000 |
|
milliseconds -= hours * 3_600_000 |
|
|
|
minutes = milliseconds // 60_000 |
|
milliseconds -= minutes * 60_000 |
|
|
|
seconds = milliseconds // 1_000 |
|
milliseconds -= seconds * 1_000 |
|
|
|
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" |
|
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}" |
|
else: |
|
|
|
return seconds |
|
|
|
|
|
def format_as_transcription(raw_segments): |
|
return "\n\n".join( |
|
[ |
|
f"{chunk['speaker']} [{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" |
|
for chunk in raw_segments |
|
] |
|
) |
|
|
|
|
|
def _return_yt_html_embed(yt_url): |
|
video_id = yt_url.split("?v=")[-1] |
|
HTML_str = ( |
|
f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>' |
|
" </center>" |
|
) |
|
return HTML_str |
|
|
|
|
|
def download_yt_audio(yt_url, filename): |
|
info_loader = youtube_dl.YoutubeDL() |
|
try: |
|
info = info_loader.extract_info(yt_url, download=False) |
|
except youtube_dl.utils.DownloadError as err: |
|
raise gr.Error(str(err)) |
|
|
|
file_length = info["duration_string"] |
|
file_length_s = format_string(file_length) |
|
|
|
if file_length_s > YT_LENGTH_LIMIT_S: |
|
yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S)) |
|
file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s)) |
|
raise gr.Error( |
|
f"To encourage fair usage of the demo, the maximum YouTube length is {yt_length_limit_hms}, " |
|
f"got {file_length_hms} YouTube video." |
|
) |
|
|
|
ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"} |
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl: |
|
try: |
|
ydl.download([yt_url]) |
|
except youtube_dl.utils.ExtractorError as err: |
|
raise gr.Error(str(err)) |
|
|
|
|
|
def align(transcription, segments, group_by_speaker=True): |
|
transcription_split = transcription.split("\n") |
|
|
|
|
|
transcript = [] |
|
for chunk in transcription_split: |
|
start_end, transcription = chunk[1:].split("] ") |
|
start, end = start_end.split("->") |
|
|
|
transcript.append({"timestamp": (format_string(start), format_string(end)), "text": transcription}) |
|
|
|
|
|
|
|
new_segments = [] |
|
prev_segment = cur_segment = segments[0] |
|
|
|
for i in range(1, len(segments)): |
|
cur_segment = segments[i] |
|
|
|
|
|
if cur_segment["label"] != prev_segment["label"] and i < len(segments): |
|
|
|
new_segments.append( |
|
{ |
|
"segment": {"start": prev_segment["segment"]["start"], "end": cur_segment["segment"]["start"]}, |
|
"speaker": prev_segment["label"], |
|
} |
|
) |
|
prev_segment = segments[i] |
|
|
|
|
|
new_segments.append( |
|
{ |
|
"segment": {"start": prev_segment["segment"]["start"], "end": cur_segment["segment"]["end"]}, |
|
"speaker": prev_segment["label"], |
|
} |
|
) |
|
|
|
|
|
end_timestamps = np.array([chunk["timestamp"][-1] for chunk in transcript]) |
|
segmented_preds = [] |
|
|
|
|
|
for segment in new_segments: |
|
|
|
end_time = segment["segment"]["end"] |
|
|
|
upto_idx = np.argmin(np.abs(end_timestamps - end_time)) |
|
|
|
if group_by_speaker: |
|
segmented_preds.append( |
|
{ |
|
"speaker": segment["speaker"], |
|
"text": "".join([chunk["text"] for chunk in transcript[: upto_idx + 1]]), |
|
"timestamp": (transcript[0]["timestamp"][0], transcript[upto_idx]["timestamp"][1]), |
|
} |
|
) |
|
else: |
|
for i in range(upto_idx + 1): |
|
segmented_preds.append({"speaker": segment["speaker"], **transcript[i]}) |
|
|
|
|
|
transcript = transcript[upto_idx + 1 :] |
|
end_timestamps = end_timestamps[upto_idx + 1 :] |
|
|
|
|
|
transcription = format_as_transcription(segmented_preds) |
|
return transcription |
|
|
|
|
|
def transcribe(audio_path, task="transcribe", group_by_speaker=True, progress=gr.Progress()): |
|
|
|
job = client.submit( |
|
audio_path, |
|
task, |
|
True, |
|
api_name="/predict_1", |
|
) |
|
|
|
|
|
progress(0, desc="Diarizing...") |
|
diarization = diarization_pipeline(audio_path) |
|
segments = diarization.for_json()["content"] |
|
|
|
|
|
progress(0.33, desc="Transcribing...") |
|
transcription, _ = job.result() |
|
|
|
|
|
progress(0.66, desc="Aligning...") |
|
transcription = align(transcription, segments, group_by_speaker=group_by_speaker) |
|
|
|
return transcription |
|
|
|
|
|
def transcribe_yt(yt_url, task="transcribe", group_by_speaker=True, progress=gr.Progress()): |
|
|
|
job = client.submit( |
|
yt_url, |
|
task, |
|
True, |
|
api_name="/predict_2", |
|
) |
|
|
|
html_embed_str = _return_yt_html_embed(yt_url) |
|
progress(0, desc="Downloading YouTube video...") |
|
with tempfile.TemporaryDirectory() as tmpdirname: |
|
filepath = os.path.join(tmpdirname, "video.mp4") |
|
download_yt_audio(yt_url, filepath) |
|
with open(filepath, "rb") as f: |
|
inputs = f.read() |
|
|
|
inputs = ffmpeg_read(inputs, SAMPLING_RATE) |
|
inputs = torch.from_numpy(inputs).float() |
|
inputs = inputs.unsqueeze(0) |
|
|
|
|
|
progress(0.25, desc="Diarizing...") |
|
diarization = diarization_pipeline( |
|
{"waveform": inputs, "sample_rate": SAMPLING_RATE}, |
|
) |
|
segments = diarization.for_json()["content"] |
|
|
|
|
|
progress(0.50, desc="Transcribing...") |
|
_, transcription, _ = job.result() |
|
|
|
|
|
progress(0.75, desc="Aligning...") |
|
transcription = align(transcription, segments, group_by_speaker=group_by_speaker) |
|
|
|
return html_embed_str, transcription |
|
|
|
|
|
title = "Whisper JAX + Speaker Diarization ⚡️" |
|
|
|
description = """Combine the speed of Whisper JAX with pyannote speaker diarization to transcribe meetings in super fast time. Demo uses Whisper JAX as an [endpoint](https://twitter.com/sanchitgandhi99/status/1656665496463495168) and pyannote speaker diarization running locally. The Whisper JAX endpoint is run asynchronously, meaning speaker diarization is run in parallel to the speech transcription. The diarized timestamps are aligned with the Whisper output to give the final speaker-segmented transcription. |
|
|
|
To duplicate the demo, first accept the pyannote terms of use for the [speaker diarization](https://huggingface.co/pyannote/speaker-diarization) and [segmentation](https://huggingface.co/pyannote/segmentation) models. Then, click [here](https://huggingface.co/spaces/sanchit-gandhi/whisper-jax-diarization?duplicate=true) to duplicate the demo, and enter your Hugging Face access token as a Space secret when prompted. |
|
""" |
|
|
|
article = "Whisper large-v2 model by OpenAI. Speaker diarization model by pyannote. Whisper JAX backend running JAX on a TPU v4-8 through the generous support of the [TRC](https://sites.research.google/trc/about/) programme. Whisper JAX [code](https://github.com/sanchit-gandhi/whisper-jax) and Gradio demo by 🤗 Hugging Face." |
|
|
|
microphone = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.inputs.Audio(source="microphone", optional=True, type="filepath"), |
|
gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), |
|
gr.inputs.Checkbox(default=True, label="Group by speaker"), |
|
], |
|
outputs=[ |
|
gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), |
|
], |
|
allow_flagging="never", |
|
title=title, |
|
description=description, |
|
article=article, |
|
) |
|
|
|
audio_file = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"), |
|
gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), |
|
gr.inputs.Checkbox(default=True, label="Group by speaker"), |
|
], |
|
outputs=[ |
|
gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), |
|
], |
|
allow_flagging="never", |
|
title=title, |
|
description=description, |
|
article=article, |
|
) |
|
|
|
youtube = gr.Interface( |
|
fn=transcribe_yt, |
|
inputs=[ |
|
gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"), |
|
gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), |
|
gr.inputs.Checkbox(default=True, label="Group by speaker"), |
|
], |
|
outputs=[ |
|
gr.outputs.HTML(label="Video"), |
|
gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), |
|
], |
|
allow_flagging="never", |
|
title=title, |
|
examples=[ |
|
["https://www.youtube.com/watch?v=m8u-18Q0s7I", "transcribe", True], |
|
["https://www.youtube.com/watch?v=LCOe3a9EHJs", "transcribe", True], |
|
], |
|
cache_examples=False, |
|
description=description, |
|
article=article, |
|
) |
|
|
|
demo = gr.Blocks() |
|
|
|
with demo: |
|
gr.TabbedInterface([microphone, audio_file, youtube], ["Microphone", "Audio File", "YouTube"]) |
|
|
|
demo.queue(max_size=10) |
|
demo.launch() |
|
|