|
import gradio as gr |
|
|
|
|
|
models = { |
|
"Whisper Small": "openai/whisper-small.en", |
|
"Wav2Vec2": "facebook/wav2vec2-base-960h" |
|
} |
|
|
|
|
|
whisper = gr.Interface.load(f"huggingface/{models['Whisper Small']}") |
|
wav2vec = gr.Interface.load(f"huggingface/{models['Wav2Vec2']}") |
|
|
|
|
|
def transcribe_with_all(audio_path): |
|
whisper_result = whisper(audio_path) |
|
wav2vec_result = wav2vec(audio_path) |
|
return whisper_result, wav2vec_result |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# مقارنة بين نماذج التعرف على الصوت") |
|
gr.Markdown("قارن بين نموذج Whisper و Wav2Vec2") |
|
|
|
audio_input = gr.Audio(type="filepath", label="ملف صوتي") |
|
|
|
transcribe_btn = gr.Button("تحويل النص") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("### Whisper Small (OpenAI)") |
|
whisper_output = gr.Textbox(label="نتيجة Whisper") |
|
with gr.Column(): |
|
gr.Markdown("### Wav2Vec2 (Facebook)") |
|
wav2vec_output = gr.Textbox(label="نتيجة Wav2Vec2") |
|
|
|
transcribe_btn.click( |
|
fn=transcribe_with_all, |
|
inputs=audio_input, |
|
outputs=[whisper_output, wav2vec_output] |
|
) |
|
|
|
demo.launch(share=True) |
|
|