Spaces:
Sleeping
Sleeping
#import gradio as gr | |
import tempfile | |
from pydub import AudioSegment | |
from transformers import pipeline | |
from pyannote.audio import Pipeline | |
# Load models dynamically | |
def load_models(model_size): | |
if model_size == "transcriber": | |
model_name = "clinifyemr/yoruba-model-finetuned" | |
transcriber = pipeline("automatic-speech-recognition", model=model_name) | |
return transcriber | |
else: | |
raise ValueError("Model size not supported in this application.") | |
# Process the audio file | |
def process_audio(file, num_speakers, model_size): | |
audio_file = AudioSegment.from_file(file.name) | |
transcriber = load_models(model_size) | |
# Temporary file setup | |
temp_path = tempfile.mktemp(suffix=".wav") | |
audio_file.export(temp_path, format="wav") | |
# Load diarization pipeline | |
diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", use_auth_token="HF_TOKEN") | |
diarization = diarization_pipeline(temp_path, min_speakers=num_speakers, max_speakers=5) | |
# Transcribe each segment | |
def transcribe_segment(start, end): | |
segment_audio = audio_file[start * 1000:end * 1000] # pydub works in milliseconds | |
segment_path = tempfile.mktemp(suffix=".wav") | |
segment_audio.export(segment_path, format="wav") | |
transcription = transcriber(segment_path) | |
os.remove(segment_path) | |
return transcription['text'] | |
transcripts = [] | |
for segment, _, speaker in diarization.itertracks(yield_label=True): | |
transcription_text = transcribe_segment(segment.start, segment.end) | |
transcripts.append(f"Speaker {speaker}: {transcription_text}") | |
os.remove(temp_path) # Clean up the temporary file | |
return "\n".join(transcripts) | |
# Gradio interface setup | |
iface = gr.Interface( | |
fn=process_audio, | |
inputs=[ | |
#gr.components.Audio(label="Upload your audio file", type="file"), | |
gr.components.Audio(label="Upload your audio file"), | |
gr.components.Dropdown(choices=[1,2,3,4], label="Number of Speakers"), | |
gr.components.Dropdown(choices=['transcriber'], label="Model Selection") # Assuming only 'transcriber' is relevant here | |
], | |
outputs=gr.Textbox(label="Transcription"), | |
title="Audio Transcription and Speaker Diarization", | |
description="Upload your audio file to transcribe and analyze speaker diarization." | |
) | |
if __name__ == "__main__": | |
iface.launch() | |