|
import pyarrow as pa |
|
import whisper |
|
from pynput import keyboard |
|
from pynput.keyboard import Key |
|
from dora import DoraStatus |
|
|
|
import numpy as np |
|
import pyarrow as pa |
|
import sounddevice as sd |
|
|
|
model = whisper.load_model("base") |
|
|
|
SAMPLE_RATE = 16000 |
|
MAX_DURATION = 20 |
|
|
|
|
|
class Operator: |
|
""" |
|
Transforming Speech to Text using OpenAI Whisper model |
|
""" |
|
|
|
def on_event( |
|
self, |
|
dora_event, |
|
send_output, |
|
) -> DoraStatus: |
|
global model |
|
if dora_event["type"] == "INPUT": |
|
|
|
with keyboard.Events() as events: |
|
event = events.get(1.0) |
|
if event is not None and event.key == Key.up: |
|
send_output("led", pa.array([0, 255, 0])) |
|
|
|
audio_data = sd.rec( |
|
int(SAMPLE_RATE * MAX_DURATION), |
|
samplerate=SAMPLE_RATE, |
|
channels=1, |
|
dtype=np.int16, |
|
blocking=True, |
|
) |
|
|
|
audio = audio_data.ravel().astype(np.float32) / 32768.0 |
|
|
|
|
|
audio = whisper.pad_or_trim(audio) |
|
result = model.transcribe(audio, language="en") |
|
send_output( |
|
"text", pa.array([result["text"]]), dora_event["metadata"] |
|
) |
|
send_output("led", pa.array([0, 0, 255])) |
|
del model |
|
|
|
import gc |
|
|
|
gc.collect() |
|
|
|
return DoraStatus.CONTINUE |
|
|