nithinraok's picture
Update app.py
fb5c3ca
raw
history blame
1.84 kB
from nemo.collections.asr.models import NeuralDiarizer
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels
import gradio as gr
import torch
import pandas as pd
device = "cuda" if torch.cuda.is_available() else "cpu"
model = NeuralDiarizer.from_pretrained("diar_msdd_telephonic").to(device)
def run_diarization(path1):
annotation = model(path1)
rttm=annotation.to_rttm()
df = pd.DataFrame(columns=['start_time', 'end_time', 'speaker'])
for idx,line in enumerate(rttm.splitlines()):
split = line.split()
start_time, duration, speaker = split[3], split[4], split[7]
end_time = float(start_time) + float(duration)
df.loc[idx] = start_time, end_time, speaker
return df
inputs = [
gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Input Audio"),
]
output = gr.outputs.Dataframe()
description = (
"This demonstration will perform offline speaker diarization on an audio file using nemo"
)
article = (
"<p style='text-align: center'>"
"<a href='https://huggingface.co/nvidia/speakerverification_en_titanet_large' target='_blank'>πŸŽ™οΈ Learn more about TitaNet model</a> | "
"<a href='https://arxiv.org/pdf/2110.04410.pdf' target='_blank'>πŸ“š TitaNet paper</a> | "
"<a href='https://github.com/NVIDIA/NeMo' target='_blank'>πŸ§‘β€πŸ’» Repository</a>"
"</p>"
)
examples = [
["data/sample_interview_conversation.wav"],
["data/id10270_5r0dWxy17C8-00001.wav"],
]
interface = gr.Interface(
fn=run_diarization,
inputs=inputs,
outputs=output,
title="Offline Speaker Diarization with NeMo",
description=description,
article=article,
layout="horizontal",
theme="huggingface",
allow_flagging=False,
live=False,
examples=examples,
)
interface.launch(enable_queue=True)