File size: 609 Bytes
034b730 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
import pyarrow as pa
import whisper
from dora import DoraStatus
model = whisper.load_model("base")
class Operator:
"""
Transforming Speech to Text using OpenAI Whisper model
"""
def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
if dora_event["type"] == "INPUT":
audio = dora_event["value"].to_numpy()
audio = whisper.pad_or_trim(audio)
result = model.transcribe(audio, language="en")
send_output("text", pa.array([result["text"]]), dora_event["metadata"])
return DoraStatus.CONTINUE
|