haixuantao commited on
Commit
533dd7a
1 Parent(s): 293b414

Adding couple of working nodes

Browse files
graphs/dataflow_vlm_basic.yml CHANGED
@@ -1,45 +1,54 @@
1
  nodes:
2
- ### Camera
3
- - id: plot_bot
4
- operator:
5
- python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
8
- user_message: whisper/text
9
- assistant_message: vlm/assistant_message
10
- bbox: object_detection/bbox
 
 
 
 
11
 
12
- - id: vlm
13
  operator:
14
- python: ../operators/idefics2_op.py
15
  inputs:
16
  image:
17
  source: webcam/image
18
  queue_size: 1
19
- instruction: whisper/text
20
  outputs:
21
- - assistant_message
 
22
 
23
- - id: webcam
24
- operator:
25
- python: ../operators/webcam.py
 
26
  inputs:
27
- tick: dora/timer/millis/50
 
 
 
 
28
  outputs:
29
  - image
30
 
31
  - id: whisper
32
- operator:
33
- python: ../operators/whisper_op.py
34
  inputs:
35
  audio: dora/timer/millis/1000
36
  outputs:
37
  - text
38
 
39
- - id: object_detection
40
  operator:
41
- python: ../operators/object_detection.py
42
  inputs:
43
- image: webcam/image
44
- outputs:
45
- - bbox
 
1
  nodes:
2
+ - id: plot
3
+ custom:
4
+ source: dora-rerun
 
5
  inputs:
6
  image: webcam/image
7
+ textlog_whisper: whisper/text
8
+ textlog_vlm: idefics2/speak
9
+ envs:
10
+ IMAGE_WIDTH: 1280
11
+ IMAGE_HEIGHT: 720
12
+ IMAGE_DEPTH: 3
13
+ RERUN_MEMORY_LIMIT: 10%
14
 
15
+ - id: idefics2
16
  operator:
17
+ python: ../operators/idefics2_op_demo.py
18
  inputs:
19
  image:
20
  source: webcam/image
21
  queue_size: 1
22
+ text: whisper/text
23
  outputs:
24
+ - speak
25
+ - control
26
 
27
+ - id: robot
28
+ custom:
29
+ source: /home/peter/miniconda3/envs/robomaster/bin/python
30
+ args: ../operators/robot_minimize.py
31
  inputs:
32
+ control: idefics2/control
33
+
34
+ - id: webcam
35
+ custom:
36
+ source: ../operators/opencv_stream.py
37
  outputs:
38
  - image
39
 
40
  - id: whisper
41
+ custom:
42
+ source: ../operators/whisper_op.py
43
  inputs:
44
  audio: dora/timer/millis/1000
45
  outputs:
46
  - text
47
 
48
+ - id: parler
49
  operator:
50
+ python: ../operators/parler_op.py
51
  inputs:
52
+ text:
53
+ source: idefics2/speak
54
+ queue_size: 1
graphs/dataflow_vlm_policy.yml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ - id: plot
3
+ custom:
4
+ source: dora-rerun
5
+ inputs:
6
+ image: webcam/image
7
+ textlog_whisper: whisper/text
8
+ envs:
9
+ IMAGE_WIDTH: 1280
10
+ IMAGE_HEIGHT: 720
11
+ IMAGE_DEPTH: 3
12
+ RERUN_MEMORY_LIMIT: 10%
13
+
14
+ - id: policy
15
+ operator:
16
+ python: ../operators/policy.py
17
+ inputs:
18
+ init: llm/init
19
+ reached_kitchen: robot/reached_kitchen
20
+ reached_living_room: robot/reached_living_room
21
+ reached_office: robot/reached_office
22
+ outputs:
23
+ - go_to
24
+
25
+ - id: llm
26
+ operator:
27
+ python: ../operators/llm_op.py
28
+ inputs:
29
+ text: whisper/text
30
+ outputs:
31
+ - init
32
+
33
+ - id: robot
34
+ custom:
35
+ source: /home/peter/miniconda3/envs/robomaster/bin/python
36
+ args: ../operators/robot_minimize.py
37
+ inputs:
38
+ # control: idefics2/control
39
+ go_to: policy/go_to
40
+ outputs:
41
+ - reached_kitchen
42
+ - reached_living_room
43
+ - reached_office
44
+
45
+ - id: webcam
46
+ custom:
47
+ source: ../operators/opencv_stream.py
48
+ outputs:
49
+ - image
50
+
51
+ - id: whisper
52
+ custom:
53
+ source: ../operators/whisper_op.py
54
+ inputs:
55
+ audio: dora/timer/millis/1000
56
+ outputs:
57
+ - text
58
+
operators/idefics2_op_demo.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pyarrow as pa
3
+ from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
4
+ import torch
5
+ import gc
6
+
7
+ CAMERA_WIDTH = 1280
8
+ CAMERA_HEIGHT = 720
9
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
10
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(
11
+ ["<image>", "<fake_token_around_image>"], add_special_tokens=False
12
+ ).input_ids
13
+ EOS_WORDS_IDS = PROCESSOR.tokenizer(
14
+ "<end_of_utterance>", add_special_tokens=False
15
+ ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
16
+ model = AutoModelForVision2Seq.from_pretrained(
17
+ "HuggingFaceM4/idefics2-tfrm-compatible-AWQ",
18
+ quantization_config=AwqConfig(
19
+ bits=4,
20
+ fuse_max_seq_len=4096,
21
+ modules_to_fuse={
22
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
23
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
24
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
25
+ "use_alibi": False,
26
+ "num_attention_heads": 32,
27
+ "num_key_value_heads": 8,
28
+ "hidden_size": 4096,
29
+ },
30
+ ),
31
+ trust_remote_code=True,
32
+ ).to("cuda")
33
+
34
+
35
+ def reset_awq_cache(model):
36
+ """
37
+ Simple method to reset the AWQ fused modules cache
38
+ """
39
+ from awq.modules.fused.attn import QuantAttentionFused
40
+
41
+ for name, module in model.named_modules():
42
+ if isinstance(module, QuantAttentionFused):
43
+ module.start_pos = 0
44
+
45
+
46
+ def ask_vlm(image, instruction):
47
+ global model
48
+ prompts = [
49
+ "User:",
50
+ image,
51
+ f"{instruction}.<end_of_utterance>\n",
52
+ "Assistant:",
53
+ ]
54
+ inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}
55
+
56
+ generated_ids = model.generate(
57
+ **inputs,
58
+ bad_words_ids=BAD_WORDS_IDS,
59
+ max_new_tokens=25,
60
+ repetition_penalty=1.2,
61
+ )
62
+ generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
63
+ reset_awq_cache(model)
64
+
65
+ gc.collect()
66
+ torch.cuda.empty_cache()
67
+ return generated_texts[0].split("\nAssistant: ")[1]
68
+
69
+
70
+ class Operator:
71
+ def __init__(self):
72
+ self.image = None
73
+ self.text = None
74
+
75
+ def on_event(
76
+ self,
77
+ dora_event,
78
+ send_output,
79
+ ) -> DoraStatus:
80
+ if dora_event["type"] == "INPUT":
81
+ if dora_event["id"] == "image":
82
+ self.image = (
83
+ dora_event["value"]
84
+ .to_numpy()
85
+ .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
86
+ )
87
+ elif dora_event["id"] == "text":
88
+ self.text = dora_event["value"][0].as_py()
89
+ output = ask_vlm(self.image, self.text).lower()
90
+ send_output(
91
+ "speak",
92
+ pa.array([output]),
93
+ )
94
+ """
95
+ if "sofa" in output:
96
+ send_output(
97
+ "control",
98
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 50.0]),
99
+ )
100
+ elif "back" in self.text:
101
+ send_output(
102
+ "control",
103
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
104
+ )
105
+ """
106
+
107
+ return DoraStatus.CONTINUE
operators/llm_op.py CHANGED
@@ -5,6 +5,7 @@ import pyarrow as pa
5
  from transformers import AutoModelForCausalLM, AutoTokenizer
6
  import torch
7
 
 
8
  import re
9
  import time
10
 
@@ -136,6 +137,8 @@ def replace_code_in_source(source_code, replacement_block: str):
136
 
137
 
138
  class Operator:
 
 
139
 
140
  def on_event(
141
  self,
@@ -168,18 +171,13 @@ class Operator:
168
  print("response: ", output, flush=True)
169
  with open(path, "w") as file:
170
  file.write(source_code)
171
- del model
172
- del tokenizer
173
- # model will still be on cache until its place is taken by other objects so also execute the below lines
174
- import gc # garbage collect library
175
 
176
  gc.collect()
177
  torch.cuda.empty_cache()
178
- time.sleep(9)
179
- send_output("init", pa.array([]))
180
-
181
- ## Stopping to liberate GPU space
182
- return DoraStatus.STOP
183
 
184
  return DoraStatus.CONTINUE
185
 
@@ -230,7 +228,7 @@ if __name__ == "__main__":
230
  [
231
  {
232
  "path": path,
233
- "user_message": "go to the living room, ask the model if there is people, if there is, say i'm going to go get coffee for you, then go to the kitchen, when you reach the kitchen, check with the model if there is a person and say can i have a coffee please, then wait 10 sec and go back to the living room",
234
  },
235
  ]
236
  ),
 
5
  from transformers import AutoModelForCausalLM, AutoTokenizer
6
  import torch
7
 
8
+ import gc # garbage collect library
9
  import re
10
  import time
11
 
 
137
 
138
 
139
  class Operator:
140
+ def __init__(self) -> None:
141
+ self.policy_init = False
142
 
143
  def on_event(
144
  self,
 
171
  print("response: ", output, flush=True)
172
  with open(path, "w") as file:
173
  file.write(source_code)
 
 
 
 
174
 
175
  gc.collect()
176
  torch.cuda.empty_cache()
177
+ time.sleep(6)
178
+ if not self.policy_init:
179
+ send_output("init", pa.array([]))
180
+ self.policy_init = True
 
181
 
182
  return DoraStatus.CONTINUE
183
 
 
228
  [
229
  {
230
  "path": path,
231
+ "user_message": "go to the office, and then, say I know that you work hard, so I brought some a chocolate, wait for 10 seconds, and then play the office song and then go to the kitchen,",
232
  },
233
  ]
234
  ),
operators/policy.py CHANGED
@@ -1,32 +1,29 @@
1
  import pyarrow as pa
2
  from dora import DoraStatus
3
- from utils import ask_vlm, speak
4
  from time import sleep
5
 
6
 
7
  class Operator:
8
  def __init__(self):
9
- self.location = ["KITCHEN", "LIVING_ROOM"]
10
- pass
11
-
12
- def ask_model(self, image, text: str) -> str:
13
- text = ask_vlm(image, text).lower()
14
- return text
15
 
16
  def speak(self, text: str):
17
  speak(text)
18
 
 
 
 
19
  def on_event(self, event: dict, send_output) -> DoraStatus:
20
  if event["type"] == "INPUT":
21
  id = event["id"]
22
  # On initialization
23
  if id == "init":
24
  send_output("go_to", pa.array([""]))
25
- elif id == "reached_living_room":
26
- image = event["value"].to_numpy().reshape((540, 960, 3))
27
  pass
28
  elif id == "reached_kitchen":
29
- image = event["value"].to_numpy().reshape((540, 960, 3))
30
  pass
31
 
32
  return DoraStatus.CONTINUE
 
1
  import pyarrow as pa
2
  from dora import DoraStatus
3
+ from utils import speak, play
4
  from time import sleep
5
 
6
 
7
  class Operator:
8
  def __init__(self):
9
+ self.location = ["KITCHEN", "OFFICE"]
10
+ self.music = ["office.mp3"]
 
 
 
 
11
 
12
  def speak(self, text: str):
13
  speak(text)
14
 
15
+ def play(self, file: str):
16
+ play(file)
17
+
18
  def on_event(self, event: dict, send_output) -> DoraStatus:
19
  if event["type"] == "INPUT":
20
  id = event["id"]
21
  # On initialization
22
  if id == "init":
23
  send_output("go_to", pa.array([""]))
24
+ elif id == "reached_office":
 
25
  pass
26
  elif id == "reached_kitchen":
 
27
  pass
28
 
29
  return DoraStatus.CONTINUE
operators/robot_minimize.py CHANGED
@@ -1,5 +1,8 @@
1
  from robomaster import robot
2
  from time import sleep
 
 
 
3
 
4
 
5
  def wait(event):
@@ -12,19 +15,58 @@ assert ep_robot.initialize(conn_type="ap"), "Could not initialize ep_robot"
12
  assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream"
13
  ep_robot.gimbal.recenter().wait_for_completed()
14
 
15
- from dora import Node
16
 
17
  node = Node()
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  for dora_event in node:
20
- if dora_event["type"] == "INPUT":
21
  [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy()
22
- print(dora_event["value"].to_numpy())
23
- event = ep_robot.gimbal.moveto(
24
- pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0
25
- )
26
- wait(event)
27
- sleep(2)
28
- event = ep_robot.chassis.move(x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed)
29
- wait(event)
30
- sleep(6)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from robomaster import robot
2
  from time import sleep
3
+ from dora import Node
4
+ import numpy as np
5
+ import pyarrow as pa
6
 
7
 
8
  def wait(event):
 
15
  assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream"
16
  ep_robot.gimbal.recenter().wait_for_completed()
17
 
 
18
 
19
  node = Node()
20
 
21
+ current_location = "HOME"
22
+ LOCATION = {
23
+ "HOME": {
24
+ "KITCHEN": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]),
25
+ "OFFICE": np.array([[0.5, 0.0, 0.0, 0.8, 0.0, 0.0, 0.0]]),
26
+ },
27
+ "KITCHEN": {
28
+ "OFFICE": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]),
29
+ },
30
+ "OFFICE": {
31
+ "KITCHEN": np.array([[-0.5, 0.0, 0.0, 0.8, 0.0, 0.0, -180.0]]),
32
+ },
33
+ }
34
+
35
  for dora_event in node:
36
+ if dora_event["type"] == "INPUT" and dora_event["id"] == "control":
37
  [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy()
38
+ print(dora_event["value"].to_numpy(), flush=True)
39
+
40
+ if any([pitch, yaw]):
41
+ event = ep_robot.gimbal.moveto(
42
+ pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0
43
+ )
44
+ wait(event)
45
+ sleep(2)
46
+ if any([x, y, z]):
47
+ event = ep_robot.chassis.move(
48
+ x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
49
+ )
50
+ wait(event)
51
+ sleep(6)
52
+ if dora_event["type"] == "INPUT" and dora_event["id"] == "go_to":
53
+ destination = dora_event["value"][0].as_py()
54
+ commands = LOCATION[current_location][destination]
55
+ for command in commands:
56
+
57
+ [x, y, z, xy_speed, z_speed, pitch, yaw] = command
58
+
59
+ if any([pitch, yaw]):
60
+ event = ep_robot.gimbal.moveto(
61
+ pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0
62
+ )
63
+ wait(event)
64
+ sleep(2)
65
+ if any([x, y, z]):
66
+ event = ep_robot.chassis.move(
67
+ x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
68
+ )
69
+ wait(event)
70
+ sleep(3)
71
+ node.send_output(f"reached_{destination.lower()}", pa.array([]))
72
+ current_location = destination