import gradio as gr import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline import numpy as np from pyannote.audio import Pipeline from dotenv import load_dotenv import os load_dotenv() # Check and set device device = "cuda:0" if torch.cuda.is_available() else "cpu" print(f"Using device: {device}") torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 # Model and pipeline setup model_id = "distil-whisper/distil-small.en" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, torch_dtype=torch_dtype, device=device, ) # diarization pipeline (renamed to avoid conflict) diarization_pipeline = Pipeline.from_pretrained( "pyannote/speaker-diarization-3.0", use_auth_token=os.getenv("HF_KEY") ) def transcribe(audio): sr, data = audio processed_data = np.array(data).astype(np.float32) / 32767.0 waveform_tensor = torch.tensor(processed_data[np.newaxis, :]) # results from the pipeline transcription_res = pipe({"sampling_rate": sr, "raw": processed_data})["text"] diarization_res = diarization_pipeline( {"waveform": waveform_tensor, "sample_rate": sr} ) return transcription_res, diarization_res demo = gr.Interface( fn=transcribe, inputs=gr.Audio(sources=["upload", "microphone"]), outputs=[ gr.Textbox(lines=3, info="audio transcription"), gr.Textbox(info="speaker diarization"), ], title="Automatic Speech Recognition 🗣️", description="Transcribe your speech to text with distilled whisper", ) if __name__ == "__main__": demo.launch()