haixuantao commited on
Commit
9b97c01
1 Parent(s): 533dd7a

Small update

Browse files
.gitignore CHANGED
@@ -4,7 +4,8 @@ operators/__pycache__/
4
  __pycache__/
5
  *.avi
6
  *.txt
7
-
 
8
 
9
  ## TODO:
10
  - [ ] Make human direct using voice
 
4
  __pycache__/
5
  *.avi
6
  *.txt
7
+ *.wav
8
+ *.mp3
9
 
10
  ## TODO:
11
  - [ ] Make human direct using voice
graphs/dataflow_vlm_basic.yml CHANGED
@@ -30,6 +30,7 @@ nodes:
30
  args: ../operators/robot_minimize.py
31
  inputs:
32
  control: idefics2/control
 
33
 
34
  - id: webcam
35
  custom:
@@ -51,4 +52,12 @@ nodes:
51
  inputs:
52
  text:
53
  source: idefics2/speak
54
- queue_size: 1
 
 
 
 
 
 
 
 
 
30
  args: ../operators/robot_minimize.py
31
  inputs:
32
  control: idefics2/control
33
+ led: reload/led
34
 
35
  - id: webcam
36
  custom:
 
52
  inputs:
53
  text:
54
  source: idefics2/speak
55
+ queue_size: 1
56
+
57
+ - id: reload
58
+ operator:
59
+ python: ../operators/reload.py
60
+ inputs:
61
+ image: dora/timer/millis/500
62
+ outputs:
63
+ - led
operators/reload.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pyarrow as pa
3
+
4
+
5
+ class Operator:
6
+ def __init__(self):
7
+ self.image = None
8
+ self.text = None
9
+
10
+ def on_event(
11
+ self,
12
+ dora_event,
13
+ send_output,
14
+ ) -> DoraStatus:
15
+ if dora_event["type"] == "INPUT":
16
+ send_output("led", pa.array([255, 0, 0]))
17
+ pass
18
+ return DoraStatus.CONTINUE
operators/whisper_op.py CHANGED
@@ -1,8 +1,8 @@
1
  import pyarrow as pa
2
  import whisper
3
  from pynput import keyboard
4
- from pynput.keyboard import Key
5
- from dora import DoraStatus
6
 
7
  import torch
8
  import numpy as np
@@ -13,57 +13,47 @@ import gc # garbage collect library
13
  model = whisper.load_model("base")
14
 
15
  SAMPLE_RATE = 16000
16
- MAX_DURATION = 10
17
- MIN_DURATION = 6
18
-
19
-
20
- class Operator:
21
- """
22
- Transforming Speech to Text using OpenAI Whisper model
23
- """
24
-
25
- def __init__(self) -> None:
26
- self.policy_init = False
27
-
28
- def on_event(
29
- self,
30
- dora_event,
31
- send_output,
32
- ) -> DoraStatus:
33
- global model
34
- if dora_event["type"] == "INPUT":
35
- ## Check for keyboard event
36
- with keyboard.Events() as events:
37
- event = events.get(1.0)
38
- if event is not None and event.key == Key.up:
39
- # send_output("led", pa.array([0, 255, 0]))
40
-
41
- if self.policy_init == False:
42
- self.policy_init = True
43
- duration = MAX_DURATION
44
- else:
45
- duration = MIN_DURATION
46
-
47
- ## Microphone
48
- audio_data = sd.rec(
49
- int(SAMPLE_RATE * duration),
50
- samplerate=SAMPLE_RATE,
51
- channels=1,
52
- dtype=np.int16,
53
- blocking=True,
54
- )
55
-
56
- audio = audio_data.ravel().astype(np.float32) / 32768.0
57
-
58
- ## Speech to text
59
- audio = whisper.pad_or_trim(audio)
60
- result = model.transcribe(audio, language="en")
61
- send_output(
62
- "text", pa.array([result["text"]]), dora_event["metadata"]
63
- )
64
- # send_output("led", pa.array([0, 0, 255]))
65
-
66
- gc.collect()
67
- torch.cuda.empty_cache()
68
-
69
- return DoraStatus.CONTINUE
 
1
  import pyarrow as pa
2
  import whisper
3
  from pynput import keyboard
4
+ from pynput.keyboard import Key, Events
5
+ from dora import Node
6
 
7
  import torch
8
  import numpy as np
 
13
  model = whisper.load_model("base")
14
 
15
  SAMPLE_RATE = 16000
16
+ MAX_DURATION = 30
17
+
18
+ policy_init = True
19
+
20
+ node = Node()
21
+
22
+ for dora_event in node:
23
+ if dora_event["type"] == "INPUT":
24
+ ## Check for keyboard event
25
+ with keyboard.Events() as events:
26
+ event = events.get(1.0)
27
+ if (
28
+ event is not None
29
+ and event.key == Key.alt_r
30
+ and isinstance(event, Events.Press)
31
+ ):
32
+
33
+ ## Microphone
34
+ audio_data = sd.rec(
35
+ int(SAMPLE_RATE * MAX_DURATION),
36
+ samplerate=SAMPLE_RATE,
37
+ channels=1,
38
+ dtype=np.int16,
39
+ blocking=False,
40
+ )
41
+
42
+ elif (
43
+ event is not None
44
+ and event.key == Key.alt_r
45
+ and isinstance(event, Events.Release)
46
+ ):
47
+ sd.stop()
48
+ audio = audio_data.ravel().astype(np.float32) / 32768.0
49
+
50
+ ## Speech to text
51
+ audio = whisper.pad_or_trim(audio)
52
+ result = model.transcribe(audio, language="en")
53
+ node.send_output(
54
+ "text", pa.array([result["text"]]), dora_event["metadata"]
55
+ )
56
+ # send_output("led", pa.array([0, 0, 255]))
57
+
58
+ gc.collect()
59
+ torch.cuda.empty_cache()