File size: 1,680 Bytes
034b730 ffc2aa4 034b730 ffc2aa4 034b730 ffc2aa4 8852f54 ffc2aa4 034b730 8852f54 034b730 ffc2aa4 8852f54 ffc2aa4 8852f54 034b730 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import pyarrow as pa
import whisper
from pynput import keyboard
from pynput.keyboard import Key
from dora import DoraStatus
import numpy as np
import pyarrow as pa
import sounddevice as sd
model = whisper.load_model("base")
SAMPLE_RATE = 16000
MAX_DURATION = 20
class Operator:
"""
Transforming Speech to Text using OpenAI Whisper model
"""
def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
global model
if dora_event["type"] == "INPUT":
## Check for keyboard event
with keyboard.Events() as events:
event = events.get(1.0)
if event is not None and event.key == Key.up:
send_output("led", pa.array([0, 255, 0]))
## Microphone
audio_data = sd.rec(
int(SAMPLE_RATE * MAX_DURATION),
samplerate=SAMPLE_RATE,
channels=1,
dtype=np.int16,
blocking=True,
)
audio = audio_data.ravel().astype(np.float32) / 32768.0
## Speech to text
audio = whisper.pad_or_trim(audio)
result = model.transcribe(audio, language="en")
send_output(
"text", pa.array([result["text"]]), dora_event["metadata"]
)
send_output("led", pa.array([0, 0, 255]))
del model
import gc # garbage collect library
gc.collect()
return DoraStatus.CONTINUE
|