Spaces:
Running
Running
File size: 1,683 Bytes
9ece3ee d5f3eed 4360654 0667772 cb13fc4 0667772 80f190a cb13fc4 d5f3eed 80f190a 9ece3ee 9b83916 b2ce8ef 9b83916 b2ce8ef 9b83916 b2ce8ef 9ece3ee 0667772 cb13fc4 9ece3ee b2ce8ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
import torch
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import soundfile as sf
import numpy as np
from scipy import signal
import os
# Set up directories
cache_dir = "/data/cache"
flagged_dir = "/data/flagged"
os.makedirs(cache_dir, exist_ok=True)
os.makedirs(flagged_dir, exist_ok=True)
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v3", cache_dir=cache_dir)
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v3", cache_dir=cache_dir)
def process_audio(audio_path):
waveform, sr = sf.read(audio_path)
if len(waveform.shape) > 1:
waveform = waveform.mean(axis=1)
if sr != 16000:
num_samples = int(len(waveform) * 16000 / sr)
waveform = signal.resample(waveform, num_samples)
inputs = processor(waveform, sampling_rate=16000, return_tensors="pt")
predicted_ids = model.generate(**inputs, language="mk")
return processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
demo = gr.Interface(
fn=process_audio,
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"),
outputs="text",
title="Македонско препознавање на говор / Macedonian Speech Recognition",
description="Качете аудио или користете микрофон за транскрипција на македонски говор / Upload audio or use microphone to transcribe Macedonian speech",
flagging_dir=flagged_dir,
allow_flagging="manual",
flagging_options=["Incorrect Transcription", "Good Transcription"]
)
demo.launch(server_name="0.0.0.0", server_port=7860)
|