mohamed1ai's picture
Update app.py
06be46e verified
import gradio as gr
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
processor = WhisperProcessor.from_pretrained("distil-whisper/distil-large-v2")
model = WhisperForConditionalGeneration.from_pretrained("distil-whisper/distil-large-v2")
def transcrire_audio(audio, prompt):
input_features = processor(audio, return_tensors="pt").input_features
output_without_prompt = model.generate(input_features)
transcription_sans_prompt = processor.batch_decode(output_without_prompt, skip_special_tokens=True)[0]
prompt_ids = processor.get_prompt_ids(prompt)
output_with_prompt = model.generate(input_features, prompt_ids=prompt_ids)
transcription_avec_prompt = processor.batch_decode(output_with_prompt, skip_special_tokens=True)[0]
return {
"Transcription sans prompt": transcription_sans_prompt,
"Transcription avec prompt": transcription_avec_prompt
}
iface = gr.Interface(
fn=transcrire_audio,
inputs=["audio", "text"],
outputs=["text", "text"],
live=True
)
iface.launch()