haixuantao
commited on
Commit
•
034b730
0
Parent(s):
initial commit
Browse files- .gitattributes +5 -0
- .gitignore +6 -0
- graphs/dataflow_basic.yml +34 -0
- graphs/dataflow_robot_vlm.yml +45 -0
- graphs/dataflow_vlm_basic.yml +28 -0
- operators/chatgpt_op.py +159 -0
- operators/keyboard_op.py +65 -0
- operators/microphone_op.py +32 -0
- operators/opencv_stream.py +32 -0
- operators/plot.py +83 -0
- operators/robot.py +111 -0
- operators/vlm_op.py +273 -0
- operators/whisper_op.py +25 -0
- tests/test_idefix2.py +154 -0
- tests/test_robomaster.py +7 -0
.gitattributes
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.mkv filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
5 |
+
graphs/out/**/*.txt filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
graphs/yolov5n.pt
|
2 |
+
*.pt
|
3 |
+
operators/__pycache__/
|
4 |
+
__pycache__/
|
5 |
+
*.avi
|
6 |
+
*.txt
|
graphs/dataflow_basic.yml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nodes:
|
2 |
+
- id: robot
|
3 |
+
operator:
|
4 |
+
python: ../operators/robot.py
|
5 |
+
inputs:
|
6 |
+
control:
|
7 |
+
source: keyboard/submitted
|
8 |
+
queue_size: 1
|
9 |
+
tick:
|
10 |
+
source: dora/timer/millis/200
|
11 |
+
queue_size: 1
|
12 |
+
|
13 |
+
- id: bot_webcam
|
14 |
+
custom:
|
15 |
+
source: ../operators/opencv_stream.py
|
16 |
+
outputs:
|
17 |
+
- image
|
18 |
+
|
19 |
+
### Camera
|
20 |
+
- id: plot_bot
|
21 |
+
operator:
|
22 |
+
python: ../operators/plot.py
|
23 |
+
inputs:
|
24 |
+
image: bot_webcam/image
|
25 |
+
keyboard_buffer: keyboard/buffer
|
26 |
+
user_message: keyboard/submitted
|
27 |
+
|
28 |
+
- id: keyboard
|
29 |
+
custom:
|
30 |
+
source: ../operators/keyboard_op.py
|
31 |
+
outputs:
|
32 |
+
- buffer
|
33 |
+
- submitted
|
34 |
+
|
graphs/dataflow_robot_vlm.yml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nodes:
|
2 |
+
### Camera
|
3 |
+
- id: plot_bot
|
4 |
+
operator:
|
5 |
+
python: ../operators/plot.py
|
6 |
+
inputs:
|
7 |
+
image: webcam/image
|
8 |
+
assistant_message: vlm/assistant_message
|
9 |
+
keyboard_buffer: keyboard/buffer
|
10 |
+
user_message: keyboard/submitted
|
11 |
+
|
12 |
+
- id: vlm
|
13 |
+
operator:
|
14 |
+
python: ../operators/chatgpt_op.py
|
15 |
+
inputs:
|
16 |
+
image:
|
17 |
+
source: webcam/image
|
18 |
+
queue_size: 1
|
19 |
+
instruction: keyboard/submitted
|
20 |
+
outputs:
|
21 |
+
- assistant_message
|
22 |
+
|
23 |
+
- id: robot
|
24 |
+
operator:
|
25 |
+
python: ../operators/robot.py
|
26 |
+
inputs:
|
27 |
+
tick:
|
28 |
+
source: dora/timer/millis/2000
|
29 |
+
queue_size: 1
|
30 |
+
control:
|
31 |
+
source: vlm/assistant_message
|
32 |
+
queue_size: 1
|
33 |
+
|
34 |
+
- id: webcam
|
35 |
+
custom:
|
36 |
+
source: ../operators/opencv_stream.py
|
37 |
+
outputs:
|
38 |
+
- image
|
39 |
+
|
40 |
+
- id: keyboard
|
41 |
+
custom:
|
42 |
+
source: ../operators/keyboard_op.py
|
43 |
+
outputs:
|
44 |
+
- buffer
|
45 |
+
- submitted
|
graphs/dataflow_vlm_basic.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nodes:
|
2 |
+
### Camera
|
3 |
+
- id: plot_bot
|
4 |
+
operator:
|
5 |
+
python: ../operators/plot.py
|
6 |
+
inputs:
|
7 |
+
image: webcam/image
|
8 |
+
# keyboard_buffer: keyboard/buffer
|
9 |
+
# user_message: keyboard/submitted
|
10 |
+
assistant_message: vlm/assistant_message
|
11 |
+
|
12 |
+
- id: vlm
|
13 |
+
operator:
|
14 |
+
python: ../operators/chatgpt_op.py
|
15 |
+
inputs:
|
16 |
+
image:
|
17 |
+
source: webcam/image
|
18 |
+
queue_size: 1
|
19 |
+
outputs:
|
20 |
+
- assistant_message
|
21 |
+
|
22 |
+
- id: webcam
|
23 |
+
operator:
|
24 |
+
python: ../operators/webcam.py
|
25 |
+
inputs:
|
26 |
+
tick: dora/timer/millis/500
|
27 |
+
outputs:
|
28 |
+
- image
|
operators/chatgpt_op.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dora import DoraStatus
|
2 |
+
import os
|
3 |
+
import pyarrow as pa
|
4 |
+
|
5 |
+
|
6 |
+
import requests
|
7 |
+
|
8 |
+
import os
|
9 |
+
|
10 |
+
import base64
|
11 |
+
import requests
|
12 |
+
from io import BytesIO
|
13 |
+
import numpy as np
|
14 |
+
import cv2
|
15 |
+
|
16 |
+
|
17 |
+
def encode_numpy_image(np_image):
|
18 |
+
# Convert the NumPy array to a PIL Image
|
19 |
+
cv2.resize(np_image, (512, 512))
|
20 |
+
_, buffer = cv2.imencode(
|
21 |
+
".png", np_image
|
22 |
+
) # You can change '.png' to another format if needed
|
23 |
+
|
24 |
+
# Convert the buffer to a byte stream
|
25 |
+
byte_stream = BytesIO(buffer)
|
26 |
+
|
27 |
+
# Encode the byte stream to base64
|
28 |
+
base64_encoded_image = base64.b64encode(byte_stream.getvalue()).decode("utf-8")
|
29 |
+
return base64_encoded_image
|
30 |
+
|
31 |
+
|
32 |
+
CAMERA_WIDTH = 640
|
33 |
+
CAMERA_HEIGHT = 480
|
34 |
+
|
35 |
+
API_KEY = os.getenv("OPENAI_API_KEY")
|
36 |
+
|
37 |
+
|
38 |
+
MESSAGE_SENDER_TEMPLATE = """
|
39 |
+
You control a robot. Don't get too close to objects.
|
40 |
+
|
41 |
+
{user_message}
|
42 |
+
|
43 |
+
Respond with only one of the following actions:
|
44 |
+
- FORWARD
|
45 |
+
- BACKWARD
|
46 |
+
- TURN_RIGHT
|
47 |
+
- TURN_LEFT
|
48 |
+
- NOD_YES
|
49 |
+
- NOD_NO
|
50 |
+
- STOP
|
51 |
+
|
52 |
+
You're last 5 actions where:
|
53 |
+
{actions}
|
54 |
+
"""
|
55 |
+
|
56 |
+
|
57 |
+
import time
|
58 |
+
|
59 |
+
|
60 |
+
def understand_image(image, user_message, actions):
|
61 |
+
# Getting the base64 string
|
62 |
+
base64_image = encode_numpy_image(image)
|
63 |
+
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}"}
|
64 |
+
|
65 |
+
now = time.time()
|
66 |
+
payload = {
|
67 |
+
"model": "gpt-4-vision-preview",
|
68 |
+
"messages": [
|
69 |
+
{
|
70 |
+
"role": "user",
|
71 |
+
"content": [
|
72 |
+
{
|
73 |
+
"type": "text",
|
74 |
+
"text": MESSAGE_SENDER_TEMPLATE.format(
|
75 |
+
user_message="\n".join(user_message),
|
76 |
+
actions="\n".join(actions[:-5]),
|
77 |
+
),
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"type": "image_url",
|
81 |
+
"image_url": {
|
82 |
+
"url": f"data:image/jpeg;base64,{base64_image}",
|
83 |
+
"detail": "low",
|
84 |
+
},
|
85 |
+
},
|
86 |
+
],
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"max_tokens": 50,
|
90 |
+
}
|
91 |
+
|
92 |
+
response = requests.post(
|
93 |
+
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
|
94 |
+
)
|
95 |
+
|
96 |
+
print("resp:", time.time() - now)
|
97 |
+
return response.json()["choices"][0]["message"]["content"]
|
98 |
+
|
99 |
+
|
100 |
+
class Operator:
|
101 |
+
def __init__(self):
|
102 |
+
self.actions = []
|
103 |
+
self.instruction = []
|
104 |
+
|
105 |
+
def on_event(
|
106 |
+
self,
|
107 |
+
dora_event,
|
108 |
+
send_output,
|
109 |
+
) -> DoraStatus:
|
110 |
+
if dora_event["type"] == "INPUT":
|
111 |
+
if dora_event["id"] == "image":
|
112 |
+
image = (
|
113 |
+
dora_event["value"]
|
114 |
+
.to_numpy()
|
115 |
+
.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
|
116 |
+
.copy()
|
117 |
+
)
|
118 |
+
output = understand_image(image, self.instruction, self.actions)
|
119 |
+
self.actions.append(output)
|
120 |
+
print("response: ", output, flush=True)
|
121 |
+
|
122 |
+
send_output(
|
123 |
+
"assistant_message",
|
124 |
+
pa.array([f"{output}"]),
|
125 |
+
dora_event["metadata"],
|
126 |
+
)
|
127 |
+
elif dora_event["id"] == "instruction":
|
128 |
+
self.instruction.append(dora_event["value"][0].as_py())
|
129 |
+
print("instructions: ", self.instruction, flush=True)
|
130 |
+
return DoraStatus.CONTINUE
|
131 |
+
|
132 |
+
|
133 |
+
if __name__ == "__main__":
|
134 |
+
op = Operator()
|
135 |
+
|
136 |
+
# Path to the current file
|
137 |
+
current_file_path = __file__
|
138 |
+
|
139 |
+
# Directory of the current file
|
140 |
+
current_directory = os.path.dirname(current_file_path)
|
141 |
+
|
142 |
+
path = current_directory + "/test_image.jpg"
|
143 |
+
|
144 |
+
op.on_event(
|
145 |
+
{
|
146 |
+
"type": "INPUT",
|
147 |
+
"id": "code_modifier",
|
148 |
+
"value": pa.array(
|
149 |
+
[
|
150 |
+
{
|
151 |
+
"path": path,
|
152 |
+
"user_message": "change planning to make gimbal follow bounding box ",
|
153 |
+
},
|
154 |
+
]
|
155 |
+
),
|
156 |
+
"metadata": [],
|
157 |
+
},
|
158 |
+
print,
|
159 |
+
)
|
operators/keyboard_op.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pynput import keyboard
|
2 |
+
from pynput.keyboard import Key, Events
|
3 |
+
import pyarrow as pa
|
4 |
+
from dora import Node
|
5 |
+
|
6 |
+
|
7 |
+
node = Node()
|
8 |
+
buffer_text = ""
|
9 |
+
ctrl = False
|
10 |
+
submitted_text = []
|
11 |
+
cursor = 0
|
12 |
+
|
13 |
+
NODE_TOPIC = ["record", "send", "ask", "change"]
|
14 |
+
|
15 |
+
with keyboard.Events() as events:
|
16 |
+
while True:
|
17 |
+
dora_event = node.next(0.01)
|
18 |
+
if (
|
19 |
+
dora_event is not None
|
20 |
+
and dora_event["type"] == "INPUT"
|
21 |
+
and dora_event["id"] == "recording"
|
22 |
+
):
|
23 |
+
buffer_text += dora_event["value"][0].as_py()
|
24 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
25 |
+
continue
|
26 |
+
|
27 |
+
event = events.get(1.0)
|
28 |
+
if event is not None and isinstance(event, Events.Press):
|
29 |
+
if hasattr(event.key, "char"):
|
30 |
+
cursor = 0
|
31 |
+
buffer_text += event.key.char
|
32 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
33 |
+
else:
|
34 |
+
if event.key == Key.backspace:
|
35 |
+
buffer_text = buffer_text[:-1]
|
36 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
37 |
+
elif event.key == Key.esc:
|
38 |
+
buffer_text = ""
|
39 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
40 |
+
elif event.key == Key.enter:
|
41 |
+
node.send_output("submitted", pa.array([buffer_text]))
|
42 |
+
first_word = buffer_text.split(" ")[0]
|
43 |
+
if first_word in NODE_TOPIC:
|
44 |
+
node.send_output(first_word, pa.array([buffer_text]))
|
45 |
+
submitted_text.append(buffer_text)
|
46 |
+
buffer_text = ""
|
47 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
48 |
+
elif event.key == Key.ctrl:
|
49 |
+
ctrl = True
|
50 |
+
elif event.key == Key.space:
|
51 |
+
buffer_text += " "
|
52 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
53 |
+
elif event.key == Key.up:
|
54 |
+
if len(submitted_text) > 0:
|
55 |
+
cursor = max(cursor - 1, -len(submitted_text))
|
56 |
+
buffer_text = submitted_text[cursor]
|
57 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
58 |
+
elif event.key == Key.down:
|
59 |
+
if len(submitted_text) > 0:
|
60 |
+
cursor = min(cursor + 1, 0)
|
61 |
+
buffer_text = submitted_text[cursor]
|
62 |
+
node.send_output("buffer", pa.array([buffer_text]))
|
63 |
+
elif event is not None and isinstance(event, Events.Release):
|
64 |
+
if event.key == Key.ctrl:
|
65 |
+
ctrl = False
|
operators/microphone_op.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pyarrow as pa
|
3 |
+
import sounddevice as sd
|
4 |
+
|
5 |
+
from dora import DoraStatus
|
6 |
+
|
7 |
+
SAMPLE_RATE = 16000
|
8 |
+
MAX_DURATION = 5
|
9 |
+
|
10 |
+
|
11 |
+
class Operator:
|
12 |
+
"""
|
13 |
+
Microphone operator that records the audio
|
14 |
+
"""
|
15 |
+
|
16 |
+
def on_event(
|
17 |
+
self,
|
18 |
+
dora_event,
|
19 |
+
send_output,
|
20 |
+
) -> DoraStatus:
|
21 |
+
if dora_event["type"] == "INPUT":
|
22 |
+
audio_data = sd.rec(
|
23 |
+
int(SAMPLE_RATE * MAX_DURATION),
|
24 |
+
samplerate=SAMPLE_RATE,
|
25 |
+
channels=1,
|
26 |
+
dtype=np.int16,
|
27 |
+
blocking=True,
|
28 |
+
)
|
29 |
+
|
30 |
+
audio_data = audio_data.ravel().astype(np.float32) / 32768.0
|
31 |
+
send_output("audio", pa.array(audio_data), dora_event["metadata"])
|
32 |
+
return DoraStatus.CONTINUE
|
operators/opencv_stream.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import pyarrow as pa
|
3 |
+
from dora import Node
|
4 |
+
|
5 |
+
node = Node()
|
6 |
+
# TCP stream URL (replace with your stream URL)
|
7 |
+
TCP_STREAM_URL = "tcp://192.168.2.1:40921"
|
8 |
+
# Global variables, change it to adapt your needs
|
9 |
+
|
10 |
+
CAMERA_WIDTH = 640
|
11 |
+
CAMERA_HEIGHT = 480
|
12 |
+
|
13 |
+
# Create a VideoCapture object using the TCP stream URL
|
14 |
+
cap = cv2.VideoCapture(TCP_STREAM_URL)
|
15 |
+
|
16 |
+
# Check if the VideoCapture object opened successfully
|
17 |
+
assert cap.isOpened(), "Error: Could not open video capture."
|
18 |
+
|
19 |
+
while True:
|
20 |
+
# Read a frame from the stream
|
21 |
+
ret, frame = cap.read()
|
22 |
+
|
23 |
+
if not ret:
|
24 |
+
break # Break the loop when no more frames are available
|
25 |
+
frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT))
|
26 |
+
|
27 |
+
node.send_output("image", pa.array(frame.ravel()))
|
28 |
+
|
29 |
+
|
30 |
+
# Release the VideoCapture object and any OpenCV windows
|
31 |
+
cap.release()
|
32 |
+
cv2.destroyAllWindows()
|
operators/plot.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
|
3 |
+
|
4 |
+
from dora import DoraStatus
|
5 |
+
|
6 |
+
|
7 |
+
CAMERA_WIDTH = 640
|
8 |
+
CAMERA_HEIGHT = 480
|
9 |
+
|
10 |
+
FONT = cv2.FONT_HERSHEY_SIMPLEX
|
11 |
+
|
12 |
+
writer = cv2.VideoWriter(
|
13 |
+
"output01.avi",
|
14 |
+
cv2.VideoWriter_fourcc(*"MJPG"),
|
15 |
+
30,
|
16 |
+
(CAMERA_WIDTH, CAMERA_HEIGHT),
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
class Operator:
|
21 |
+
"""
|
22 |
+
Plot image and bounding box
|
23 |
+
"""
|
24 |
+
|
25 |
+
def __init__(self):
|
26 |
+
self.bboxs = []
|
27 |
+
self.buffer = ""
|
28 |
+
self.submitted = []
|
29 |
+
self.lines = []
|
30 |
+
|
31 |
+
def on_event(
|
32 |
+
self,
|
33 |
+
dora_event,
|
34 |
+
send_output,
|
35 |
+
):
|
36 |
+
if dora_event["type"] == "INPUT":
|
37 |
+
id = dora_event["id"]
|
38 |
+
value = dora_event["value"]
|
39 |
+
if id == "image":
|
40 |
+
|
41 |
+
image = (
|
42 |
+
value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy()
|
43 |
+
)
|
44 |
+
|
45 |
+
cv2.putText(
|
46 |
+
image, self.buffer, (20, 14 + 15 * 25), FONT, 0.8, (190, 250, 0), 2
|
47 |
+
)
|
48 |
+
|
49 |
+
i = 0
|
50 |
+
for text in self.submitted[::-1]:
|
51 |
+
color = (
|
52 |
+
(0, 255, 190)
|
53 |
+
if text["role"] == "user_message"
|
54 |
+
else (0, 190, 255)
|
55 |
+
)
|
56 |
+
cv2.putText(
|
57 |
+
image,
|
58 |
+
text["content"],
|
59 |
+
(
|
60 |
+
20,
|
61 |
+
14 + (13 - i) * 25,
|
62 |
+
),
|
63 |
+
FONT,
|
64 |
+
0.8,
|
65 |
+
color,
|
66 |
+
2,
|
67 |
+
)
|
68 |
+
i += 1
|
69 |
+
writer.write(image)
|
70 |
+
cv2.imshow("frame", image)
|
71 |
+
if cv2.waitKey(1) & 0xFF == ord("q"):
|
72 |
+
return DoraStatus.STOP
|
73 |
+
elif id == "keyboard_buffer":
|
74 |
+
self.buffer = value[0].as_py()
|
75 |
+
elif "message" in id:
|
76 |
+
self.submitted += [
|
77 |
+
{
|
78 |
+
"role": id,
|
79 |
+
"content": value[0].as_py(),
|
80 |
+
}
|
81 |
+
]
|
82 |
+
|
83 |
+
return DoraStatus.CONTINUE
|
operators/robot.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from robomaster import robot
|
2 |
+
from typing import Callable, Optional, Union
|
3 |
+
from enum import Enum
|
4 |
+
from dora import DoraStatus
|
5 |
+
|
6 |
+
import pyarrow as pa
|
7 |
+
|
8 |
+
|
9 |
+
CONN = "ap"
|
10 |
+
|
11 |
+
|
12 |
+
class Command(Enum):
|
13 |
+
NOD_YES = [
|
14 |
+
{"action": "gimbal", "value": [20.0, 0.0]},
|
15 |
+
{"action": "gimbal", "value": [0.0, 0.0]},
|
16 |
+
]
|
17 |
+
NOD_NO = [
|
18 |
+
{"action": "gimbal", "value": [0.0, -20.0]},
|
19 |
+
{"action": "gimbal", "value": [0.0, 20.0]},
|
20 |
+
{"action": "gimbal", "value": [0.0, 0.0]},
|
21 |
+
]
|
22 |
+
FORWARD = [
|
23 |
+
{
|
24 |
+
"action": "control",
|
25 |
+
"value": [0.5, 0.0, 0.0, 0.6, 0],
|
26 |
+
}
|
27 |
+
]
|
28 |
+
BACKWARD = [
|
29 |
+
{
|
30 |
+
"action": "control",
|
31 |
+
"value": [-0.5, 0.0, 0.0, 0.6, 0],
|
32 |
+
}
|
33 |
+
]
|
34 |
+
TURN_LEFT = [
|
35 |
+
{"action": "gimbal", "value": [0.0, -45.0]},
|
36 |
+
{
|
37 |
+
"action": "control",
|
38 |
+
"value": [0.0, 0.0, 45.0, 0.0, 50],
|
39 |
+
},
|
40 |
+
]
|
41 |
+
TURN_RIGHT = [
|
42 |
+
{"action": "gimbal", "value": [0.0, 45.0]},
|
43 |
+
{
|
44 |
+
"value": [0.0, 0.0, -45.0, 0.0, 50],
|
45 |
+
"action": "control",
|
46 |
+
},
|
47 |
+
]
|
48 |
+
UNKNOWN = [
|
49 |
+
{
|
50 |
+
"value": [0.0, 0.0, 0.0, 0.0, 0],
|
51 |
+
"action": "control",
|
52 |
+
}
|
53 |
+
]
|
54 |
+
# STOP = [0, 0, 0, 0]
|
55 |
+
# COMPLETED = [0, 0, 0, 0]
|
56 |
+
|
57 |
+
@classmethod
|
58 |
+
def parse(cls, value):
|
59 |
+
for k, v in cls.__members__.items():
|
60 |
+
if k == value:
|
61 |
+
return v
|
62 |
+
return cls.UNKNOWN
|
63 |
+
|
64 |
+
|
65 |
+
class Operator:
|
66 |
+
def __init__(self):
|
67 |
+
self.ep_robot = robot.Robot()
|
68 |
+
print("Initializing robot...")
|
69 |
+
assert self.ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot"
|
70 |
+
assert self.ep_robot.camera.start_video_stream(
|
71 |
+
display=False
|
72 |
+
), "Could not start video stream"
|
73 |
+
|
74 |
+
self.ep_robot.gimbal.recenter().wait_for_completed()
|
75 |
+
self.backlog = []
|
76 |
+
self.event = None
|
77 |
+
|
78 |
+
def on_event(
|
79 |
+
self,
|
80 |
+
dora_event: str,
|
81 |
+
send_output: Callable[[str, Union[bytes, pa.UInt8Array], Optional[dict]], None],
|
82 |
+
) -> DoraStatus:
|
83 |
+
event_type = dora_event["type"]
|
84 |
+
if event_type == "INPUT":
|
85 |
+
if not (
|
86 |
+
self.event is not None
|
87 |
+
and not (self.event._event.isSet() and self.event.is_completed)
|
88 |
+
):
|
89 |
+
if dora_event["id"] == "tick":
|
90 |
+
if len(self.backlog) > 0:
|
91 |
+
command = self.backlog.pop(0)
|
92 |
+
print(command, flush=True)
|
93 |
+
if command["action"] == "control":
|
94 |
+
[x, y, z, xy_speed, z_speed] = command["value"]
|
95 |
+
print(command, flush=True)
|
96 |
+
self.event = self.ep_robot.chassis.move(
|
97 |
+
x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
|
98 |
+
)
|
99 |
+
elif command["action"] == "gimbal":
|
100 |
+
[pitch, yaw] = command["value"]
|
101 |
+
print(command, flush=True)
|
102 |
+
self.event = self.ep_robot.gimbal.moveto(
|
103 |
+
pitch=pitch, yaw=yaw, pitch_speed=0.0, yaw_speed=50.0
|
104 |
+
)
|
105 |
+
elif dora_event["id"] == "control":
|
106 |
+
raw_command = dora_event["value"][0].as_py()
|
107 |
+
print(raw_command, flush=True)
|
108 |
+
cmd = Command.parse(raw_command)
|
109 |
+
self.backlog += cmd.value
|
110 |
+
|
111 |
+
return DoraStatus.CONTINUE
|
operators/vlm_op.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dora import DoraStatus
|
2 |
+
import pylcs
|
3 |
+
import os
|
4 |
+
import pyarrow as pa
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
import json
|
7 |
+
|
8 |
+
import re
|
9 |
+
import time
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import requests
|
13 |
+
|
14 |
+
from io import BytesIO
|
15 |
+
from PIL import Image
|
16 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
17 |
+
|
18 |
+
from transformers.image_utils import (
|
19 |
+
to_numpy_array,
|
20 |
+
PILImageResampling,
|
21 |
+
ChannelDimension,
|
22 |
+
)
|
23 |
+
from transformers.image_transforms import resize, to_channel_dimension_format
|
24 |
+
|
25 |
+
API_TOKEN = os.getenv("HF_TOKEN")
|
26 |
+
|
27 |
+
DEVICE = torch.device("cuda")
|
28 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
29 |
+
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
30 |
+
token=API_TOKEN,
|
31 |
+
)
|
32 |
+
MODEL = AutoModelForCausalLM.from_pretrained(
|
33 |
+
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
34 |
+
token=API_TOKEN,
|
35 |
+
trust_remote_code=True,
|
36 |
+
torch_dtype=torch.bfloat16,
|
37 |
+
).to(DEVICE)
|
38 |
+
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
39 |
+
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
40 |
+
BAD_WORDS_IDS = PROCESSOR.tokenizer(
|
41 |
+
["<image>", "<fake_token_around_image>"], add_special_tokens=False
|
42 |
+
).input_ids
|
43 |
+
|
44 |
+
|
45 |
+
CHATGPT = True
|
46 |
+
MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
|
47 |
+
|
48 |
+
MESSAGE_SENDER_TEMPLATE = """
|
49 |
+
### Instruction
|
50 |
+
You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed.
|
51 |
+
The schema for those json are:
|
52 |
+
- forward
|
53 |
+
- backward
|
54 |
+
- left
|
55 |
+
- right
|
56 |
+
|
57 |
+
The response should look like this:
|
58 |
+
```json
|
59 |
+
|
60 |
+
[
|
61 |
+
{{ "topic": "control", "data": "forward" }},
|
62 |
+
]
|
63 |
+
```
|
64 |
+
|
65 |
+
{user_message}
|
66 |
+
|
67 |
+
### Response:
|
68 |
+
"""
|
69 |
+
|
70 |
+
model = AutoModelForCausalLM.from_pretrained(
|
71 |
+
MODEL_NAME_OR_PATH,
|
72 |
+
device_map="auto",
|
73 |
+
trust_remote_code=True,
|
74 |
+
revision="main",
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
|
79 |
+
|
80 |
+
|
81 |
+
def extract_json_code_blocks(text):
|
82 |
+
"""
|
83 |
+
Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier.
|
84 |
+
|
85 |
+
Parameters:
|
86 |
+
- text: A string that may contain one or more json code blocks.
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
- A list of strings, where each string is a block of json code extracted from the text.
|
90 |
+
"""
|
91 |
+
pattern = r"```json\n(.*?)\n```"
|
92 |
+
matches = re.findall(pattern, text, re.DOTALL)
|
93 |
+
if len(matches) == 0:
|
94 |
+
pattern = r"```json\n(.*?)(?:\n```|$)"
|
95 |
+
matches = re.findall(pattern, text, re.DOTALL)
|
96 |
+
if len(matches) == 0:
|
97 |
+
return [text]
|
98 |
+
|
99 |
+
return matches
|
100 |
+
|
101 |
+
|
102 |
+
from openai import OpenAI
|
103 |
+
import os
|
104 |
+
|
105 |
+
import base64
|
106 |
+
import requests
|
107 |
+
|
108 |
+
API_TOKEN = os.getenv("HF_TOKEN")
|
109 |
+
|
110 |
+
|
111 |
+
# Function to encode the image
|
112 |
+
def encode_image(image_path):
|
113 |
+
with open(image_path, "rb") as image_file:
|
114 |
+
return base64.b64encode(image_file.read()).decode("utf-8")
|
115 |
+
|
116 |
+
|
117 |
+
def understand_image(image_path):
|
118 |
+
|
119 |
+
# Getting the base64 string
|
120 |
+
base64_image = encode_image(image_path)
|
121 |
+
|
122 |
+
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
|
123 |
+
|
124 |
+
payload = {
|
125 |
+
"model": "gpt-4-vision-preview",
|
126 |
+
"messages": [
|
127 |
+
{
|
128 |
+
"role": "user",
|
129 |
+
"content": [
|
130 |
+
{
|
131 |
+
"type": "text",
|
132 |
+
"text": "What’s in this image? Describe it in a short sentence",
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"type": "image_url",
|
136 |
+
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
|
137 |
+
},
|
138 |
+
],
|
139 |
+
}
|
140 |
+
],
|
141 |
+
"max_tokens": 300,
|
142 |
+
}
|
143 |
+
|
144 |
+
response = requests.post(
|
145 |
+
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
|
146 |
+
)
|
147 |
+
|
148 |
+
print(response.json()["choices"][0]["message"]["content"])
|
149 |
+
|
150 |
+
|
151 |
+
class Operator:
|
152 |
+
|
153 |
+
def on_event(
|
154 |
+
self,
|
155 |
+
dora_event,
|
156 |
+
send_output,
|
157 |
+
) -> DoraStatus:
|
158 |
+
if dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender":
|
159 |
+
user_message = dora_event["value"][0].as_py()
|
160 |
+
output = self.ask_llm(
|
161 |
+
MESSAGE_SENDER_TEMPLATE.format(user_message=user_message)
|
162 |
+
)
|
163 |
+
outputs = extract_json_code_blocks(output)[0]
|
164 |
+
print("response: ", output, flush=True)
|
165 |
+
try:
|
166 |
+
outputs = json.loads(outputs)
|
167 |
+
if not isinstance(outputs, list):
|
168 |
+
outputs = [outputs]
|
169 |
+
for output in outputs:
|
170 |
+
if not isinstance(output["data"], list):
|
171 |
+
output["data"] = [output["data"]]
|
172 |
+
|
173 |
+
if output["topic"] in ["led", "blaster"]:
|
174 |
+
send_output(
|
175 |
+
output["topic"],
|
176 |
+
pa.array(output["data"]),
|
177 |
+
dora_event["metadata"],
|
178 |
+
)
|
179 |
+
|
180 |
+
send_output(
|
181 |
+
"assistant_message",
|
182 |
+
pa.array([f"sent: {output}"]),
|
183 |
+
dora_event["metadata"],
|
184 |
+
)
|
185 |
+
else:
|
186 |
+
send_output(
|
187 |
+
"assistant_message",
|
188 |
+
pa.array(
|
189 |
+
[f"Could not send as topic was not available: {output}"]
|
190 |
+
),
|
191 |
+
dora_event["metadata"],
|
192 |
+
)
|
193 |
+
except:
|
194 |
+
send_output(
|
195 |
+
"assistant_message",
|
196 |
+
pa.array([f"Could not parse json: {outputs}"]),
|
197 |
+
dora_event["metadata"],
|
198 |
+
)
|
199 |
+
# if data is not iterable, put data in a list
|
200 |
+
return DoraStatus.CONTINUE
|
201 |
+
|
202 |
+
def ask_llm(self, prompt):
|
203 |
+
|
204 |
+
# Generate output
|
205 |
+
# prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
|
206 |
+
input = tokenizer(prompt, return_tensors="pt")
|
207 |
+
input_ids = input.input_ids.cuda()
|
208 |
+
|
209 |
+
# add attention mask here
|
210 |
+
attention_mask = input["attention_mask"]
|
211 |
+
|
212 |
+
output = model.generate(
|
213 |
+
inputs=input_ids,
|
214 |
+
temperature=0.7,
|
215 |
+
do_sample=True,
|
216 |
+
top_p=0.95,
|
217 |
+
top_k=40,
|
218 |
+
max_new_tokens=512,
|
219 |
+
attention_mask=attention_mask,
|
220 |
+
eos_token_id=tokenizer.eos_token_id,
|
221 |
+
)
|
222 |
+
# Get the tokens from the output, decode them, print them
|
223 |
+
|
224 |
+
# Get text between im_start and im_end
|
225 |
+
return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
|
226 |
+
|
227 |
+
def ask_chatgpt(self, prompt):
|
228 |
+
from openai import OpenAI
|
229 |
+
|
230 |
+
client = OpenAI()
|
231 |
+
print("---asking chatgpt: ", prompt, flush=True)
|
232 |
+
response = client.chat.completions.create(
|
233 |
+
model="gpt-4-turbo-preview",
|
234 |
+
messages=[
|
235 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
236 |
+
{"role": "user", "content": prompt},
|
237 |
+
],
|
238 |
+
)
|
239 |
+
answer = response.choices[0].message.content
|
240 |
+
|
241 |
+
print("Done", flush=True)
|
242 |
+
return answer
|
243 |
+
|
244 |
+
|
245 |
+
if __name__ == "__main__":
|
246 |
+
op = Operator()
|
247 |
+
|
248 |
+
# Path to the current file
|
249 |
+
current_file_path = __file__
|
250 |
+
|
251 |
+
# Directory of the current file
|
252 |
+
current_directory = os.path.dirname(current_file_path)
|
253 |
+
|
254 |
+
path = current_directory + "/planning_op.py"
|
255 |
+
with open(path, "r", encoding="utf8") as f:
|
256 |
+
raw = f.read()
|
257 |
+
|
258 |
+
op.on_event(
|
259 |
+
{
|
260 |
+
"type": "INPUT",
|
261 |
+
"id": "code_modifier",
|
262 |
+
"value": pa.array(
|
263 |
+
[
|
264 |
+
{
|
265 |
+
"path": path,
|
266 |
+
"user_message": "change planning to make gimbal follow bounding box ",
|
267 |
+
},
|
268 |
+
]
|
269 |
+
),
|
270 |
+
"metadata": [],
|
271 |
+
},
|
272 |
+
print,
|
273 |
+
)
|
operators/whisper_op.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pyarrow as pa
|
2 |
+
import whisper
|
3 |
+
|
4 |
+
from dora import DoraStatus
|
5 |
+
|
6 |
+
|
7 |
+
model = whisper.load_model("base")
|
8 |
+
|
9 |
+
|
10 |
+
class Operator:
|
11 |
+
"""
|
12 |
+
Transforming Speech to Text using OpenAI Whisper model
|
13 |
+
"""
|
14 |
+
|
15 |
+
def on_event(
|
16 |
+
self,
|
17 |
+
dora_event,
|
18 |
+
send_output,
|
19 |
+
) -> DoraStatus:
|
20 |
+
if dora_event["type"] == "INPUT":
|
21 |
+
audio = dora_event["value"].to_numpy()
|
22 |
+
audio = whisper.pad_or_trim(audio)
|
23 |
+
result = model.transcribe(audio, language="en")
|
24 |
+
send_output("text", pa.array([result["text"]]), dora_event["metadata"])
|
25 |
+
return DoraStatus.CONTINUE
|
tests/test_idefix2.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import requests
|
4 |
+
|
5 |
+
from io import BytesIO
|
6 |
+
from PIL import Image
|
7 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
8 |
+
|
9 |
+
from transformers.image_utils import (
|
10 |
+
to_numpy_array,
|
11 |
+
PILImageResampling,
|
12 |
+
ChannelDimension,
|
13 |
+
)
|
14 |
+
from transformers.image_transforms import resize, to_channel_dimension_format
|
15 |
+
|
16 |
+
|
17 |
+
API_TOKEN = os.getenv("HF_TOKEN")
|
18 |
+
|
19 |
+
DEVICE = torch.device("cuda")
|
20 |
+
PROCESSOR = AutoProcessor.from_pretrained(
|
21 |
+
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
22 |
+
token=API_TOKEN,
|
23 |
+
)
|
24 |
+
MODEL = AutoModelForCausalLM.from_pretrained(
|
25 |
+
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
|
26 |
+
token=API_TOKEN,
|
27 |
+
trust_remote_code=True,
|
28 |
+
torch_dtype=torch.bfloat16,
|
29 |
+
).to(DEVICE)
|
30 |
+
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
|
31 |
+
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
|
32 |
+
BAD_WORDS_IDS = PROCESSOR.tokenizer(
|
33 |
+
["<image>", "<fake_token_around_image>"], add_special_tokens=False
|
34 |
+
).input_ids
|
35 |
+
|
36 |
+
|
37 |
+
def convert_to_rgb(image):
|
38 |
+
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
39 |
+
# for transparent images. The call to `alpha_composite` handles this case
|
40 |
+
if image.mode == "RGB":
|
41 |
+
return image
|
42 |
+
|
43 |
+
image_rgba = image.convert("RGBA")
|
44 |
+
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
45 |
+
alpha_composite = Image.alpha_composite(background, image_rgba)
|
46 |
+
alpha_composite = alpha_composite.convert("RGB")
|
47 |
+
return alpha_composite
|
48 |
+
|
49 |
+
|
50 |
+
# The processor is the same as the Idefics processor except for the BILINEAR interpolation,
|
51 |
+
# so this is a hack in order to redefine ONLY the transform method
|
52 |
+
def custom_transform(x):
|
53 |
+
x = convert_to_rgb(x)
|
54 |
+
x = to_numpy_array(x)
|
55 |
+
|
56 |
+
height, width = x.shape[:2]
|
57 |
+
aspect_ratio = width / height
|
58 |
+
if width >= height and width > 980:
|
59 |
+
width = 980
|
60 |
+
height = int(width / aspect_ratio)
|
61 |
+
elif height > width and height > 980:
|
62 |
+
height = 980
|
63 |
+
width = int(height * aspect_ratio)
|
64 |
+
width = max(width, 378)
|
65 |
+
height = max(height, 378)
|
66 |
+
|
67 |
+
x = resize(x, (height, width), resample=PILImageResampling.BILINEAR)
|
68 |
+
x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
|
69 |
+
x = PROCESSOR.image_processor.normalize(
|
70 |
+
x,
|
71 |
+
mean=PROCESSOR.image_processor.image_mean,
|
72 |
+
std=PROCESSOR.image_processor.image_std,
|
73 |
+
)
|
74 |
+
x = to_channel_dimension_format(x, ChannelDimension.FIRST)
|
75 |
+
x = torch.tensor(x)
|
76 |
+
return x
|
77 |
+
|
78 |
+
|
79 |
+
def download_image(url):
|
80 |
+
try:
|
81 |
+
# Send a GET request to the URL to download the image
|
82 |
+
response = requests.get(url)
|
83 |
+
# Check if the request was successful (status code 200)
|
84 |
+
if response.status_code == 200:
|
85 |
+
# Open the image using PIL
|
86 |
+
image = Image.open(BytesIO(response.content))
|
87 |
+
# Return the PIL image object
|
88 |
+
return image
|
89 |
+
else:
|
90 |
+
print(f"Failed to download image. Status code: {response.status_code}")
|
91 |
+
return None
|
92 |
+
except Exception as e:
|
93 |
+
print(f"An error occurred: {e}")
|
94 |
+
return None
|
95 |
+
|
96 |
+
|
97 |
+
# Create text token inputs
|
98 |
+
image_seq = "<image>" * image_seq_len
|
99 |
+
|
100 |
+
instruction = "What is this?"
|
101 |
+
# Create pixel inputs
|
102 |
+
image = download_image(
|
103 |
+
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
def ask_vlm(instruction, image):
|
108 |
+
|
109 |
+
inputs = PROCESSOR.tokenizer(
|
110 |
+
[
|
111 |
+
f"{BOS_TOKEN}<fake_token_around_image>{image_seq}<fake_token_around_image>{instruction}",
|
112 |
+
],
|
113 |
+
return_tensors="pt",
|
114 |
+
add_special_tokens=False,
|
115 |
+
padding=True,
|
116 |
+
)
|
117 |
+
|
118 |
+
raw_images = [
|
119 |
+
[image],
|
120 |
+
]
|
121 |
+
output_images = [
|
122 |
+
[PROCESSOR.image_processor(img, transform=custom_transform) for img in img_list]
|
123 |
+
for img_list in raw_images
|
124 |
+
]
|
125 |
+
total_batch_size = len(output_images)
|
126 |
+
max_num_images = max([len(img_l) for img_l in output_images])
|
127 |
+
max_height = max([i.size(2) for img_l in output_images for i in img_l])
|
128 |
+
max_width = max([i.size(3) for img_l in output_images for i in img_l])
|
129 |
+
padded_image_tensor = torch.zeros(
|
130 |
+
total_batch_size, max_num_images, 3, max_height, max_width
|
131 |
+
)
|
132 |
+
padded_pixel_attention_masks = torch.zeros(
|
133 |
+
total_batch_size, max_num_images, max_height, max_width, dtype=torch.bool
|
134 |
+
)
|
135 |
+
for batch_idx, img_l in enumerate(output_images):
|
136 |
+
for img_idx, img in enumerate(img_l):
|
137 |
+
im_height, im_width = img.size()[2:]
|
138 |
+
padded_image_tensor[batch_idx, img_idx, :, :im_height, :im_width] = img
|
139 |
+
padded_pixel_attention_masks[batch_idx, img_idx, :im_height, :im_width] = (
|
140 |
+
True
|
141 |
+
)
|
142 |
+
|
143 |
+
inputs["pixel_values"] = padded_image_tensor
|
144 |
+
inputs["pixel_attention_mask"] = padded_pixel_attention_masks
|
145 |
+
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
|
146 |
+
|
147 |
+
generated_ids = MODEL.generate(
|
148 |
+
**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
|
149 |
+
)
|
150 |
+
generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
|
151 |
+
return generated_texts
|
152 |
+
|
153 |
+
|
154 |
+
print(ask_vlm(instruction, image))
|
tests/test_robomaster.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from robomaster import robot, blaster, led
|
2 |
+
|
3 |
+
CONN = "ap"
|
4 |
+
ep_robot = robot.Robot()
|
5 |
+
print("Initializing robot...")
|
6 |
+
assert ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot"
|
7 |
+
event = ep_robot.chassis.move(x=0, y=0, z=-90.0, xy_speed=0, z_speed=50)
|