Spaces:
Sleeping
Sleeping
Upload 15 files
Browse files- .gitattributes +36 -35
- .gitignore +0 -0
- Dockerfile.yaml +27 -0
- Prompt_One_shot.txt +41 -0
- Prompt_Two_Shot.txt +87 -0
- README.md +111 -10
- app.py +321 -0
- bt_generator.py +121 -0
- parser.py +285 -0
- requirements.txt +15 -0
- safety_module.py +81 -0
- simulator_env.py +453 -0
- speech_processing.py +40 -0
- text_processing.py +40 -0
- tree.xml +0 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,36 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.gguf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
Binary file (40 Bytes). View file
|
|
|
Dockerfile.yaml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a slim Python base image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Create a non-root user (UID 1000) to match Spaces runtime
|
| 5 |
+
RUN useradd -m -u 1000 user
|
| 6 |
+
|
| 7 |
+
# Switch to the non-root user
|
| 8 |
+
USER user
|
| 9 |
+
ENV HOME=/home/user \
|
| 10 |
+
PATH=/home/user/.local/bin:$PATH
|
| 11 |
+
|
| 12 |
+
# Set the working directory to the user's home
|
| 13 |
+
WORKDIR /home/user/app
|
| 14 |
+
|
| 15 |
+
# Copy and install Python dependencies
|
| 16 |
+
COPY --chown=user requirements.txt ./
|
| 17 |
+
RUN pip install --upgrade pip && \
|
| 18 |
+
pip install --no-cache-dir -r requirements.txt
|
| 19 |
+
|
| 20 |
+
# Copy the rest of the application code
|
| 21 |
+
COPY --chown=user . ./
|
| 22 |
+
|
| 23 |
+
# Expose the port defined in README.md (app_port: 7860)
|
| 24 |
+
EXPOSE 7860
|
| 25 |
+
|
| 26 |
+
# Launch the Gradio app on 0.0.0.0 so it's reachable externally
|
| 27 |
+
CMD ["python", "app.py", "--server_port", "7860", "--server_name", "0.0.0.0"]
|
Prompt_One_shot.txt
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<s>
|
| 2 |
+
<<SYS>>You are a helpful, respectful, and honest AI assistant. Your task is to generate well-structured XML code for behavior trees based on the provided instructions.<</SYS>>
|
| 3 |
+
INSTRUCTIONS: It is CRITICAL to use only the following behaviors structured as a dictionary: {
|
| 4 |
+
is_battery_low: Condition node: Check if the battery level is low. Returns SUCCESS if low, FAILURE otherwise.;
|
| 5 |
+
charge_battery: Action Node: Initiate battery charging. Always returns SUCCESS.;
|
| 6 |
+
patrol_area: Action Node: Patrol the designated area. Always returns SUCCESS.;
|
| 7 |
+
report_status: Action Node: Send a status report to the base. Always returns SUCCESS.;
|
| 8 |
+
return_to_patrol: Action Node: Return to patrol route. Always returns SUCCESS.
|
| 9 |
+
is_intruder_detected: Condition node: Check if an intruder is detected. Returns SUCCESS if detected, FAILURE otherwise.;
|
| 10 |
+
sound_alarm: Action Node: Activate the alarm system. Always returns SUCCESS.;
|
| 11 |
+
notify_security: Action Node: Send notification to security team. Always returns SUCCESS.;
|
| 12 |
+
perform_scan: Action Node: Perform a security scan of the area. Always returns SUCCESS.;
|
| 13 |
+
continue_patrol: Action Node: Continue patrolling the perimeter. Always returns SUCCESS.}.
|
| 14 |
+
To construct a behavior tree in XML format for the following command. If any behavior outside the provided dictionary is used, output (Sorry, I can't do the task).
|
| 15 |
+
USER COMMAND: generate behavior tree to "if battery is low, charge the battery; otherwise, patrol the area and report status". Take a step back and think deeply about the behavior you need for this command. Then think of the XML structure and the behavior you used.
|
| 16 |
+
RESPONSE:
|
| 17 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 18 |
+
<root BTCPP_format="3" main_tree_to_execute="BatteryManagement">
|
| 19 |
+
<BehaviorTree ID="BatteryManagement">
|
| 20 |
+
<Fallback name="High-Level Decision">
|
| 21 |
+
<Sequence name="Low Battery Sequence">
|
| 22 |
+
<is_battery_low/>
|
| 23 |
+
<charge_battery/>
|
| 24 |
+
</Sequence>
|
| 25 |
+
<Sequence name="Normal Operation Sequence">
|
| 26 |
+
<patrol_area/>
|
| 27 |
+
<report_status/>
|
| 28 |
+
</Sequence>
|
| 29 |
+
</Fallback>
|
| 30 |
+
</BehaviorTree>
|
| 31 |
+
<TreeNodesModel>
|
| 32 |
+
<Condition ID="is_battery_low"/>
|
| 33 |
+
<Action ID="charge_battery"/>
|
| 34 |
+
<Action ID="patrol_area"/>
|
| 35 |
+
<Action ID="report_status"/>
|
| 36 |
+
<Action ID="change_color" editable="true">
|
| 37 |
+
<input_port name="color"/>
|
| 38 |
+
</Action>
|
| 39 |
+
</TreeNodesModel>
|
| 40 |
+
</root>
|
| 41 |
+
</s>
|
Prompt_Two_Shot.txt
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<s>
|
| 2 |
+
<<SYS>>You are a helpful, respectful, and honest AI assistant. Your task is to generate well-structured XML code for behavior trees based on the provided instructions.<</SYS>>
|
| 3 |
+
INSTRUCTIONS: It is CRITICAL to use only the following behaviors structured as a dictionary: {
|
| 4 |
+
is_battery_low: Condition node: Check if the battery level is low. Returns SUCCESS if low, FAILURE otherwise.;
|
| 5 |
+
charge_battery: Action Node: Initiate battery charging. Always returns SUCCESS.;
|
| 6 |
+
patrol_area: Action Node: Patrol the designated area. Always returns SUCCESS.;
|
| 7 |
+
report_status: Action Node: Send a status report to the base. Always returns SUCCESS.;
|
| 8 |
+
return_to_patrol: Action Node: Return to patrol route. Always returns SUCCESS.
|
| 9 |
+
is_intruder_detected: Condition node: Check if an intruder is detected. Returns SUCCESS if detected, FAILURE otherwise.;
|
| 10 |
+
sound_alarm: Action Node: Activate the alarm system. Always returns SUCCESS.;
|
| 11 |
+
notify_security: Action Node: Send notification to security team. Always returns SUCCESS.;
|
| 12 |
+
perform_scan: Action Node: Perform a security scan of the area. Always returns SUCCESS.;
|
| 13 |
+
continue_patrol: Action Node: Continue patrolling the perimeter. Always returns SUCCESS.}.
|
| 14 |
+
To construct a behavior tree in XML format for the following command. If any behavior outside the provided dictionary is used, output (Sorry, I can't do the task).
|
| 15 |
+
USER COMMAND: generate behavior tree to "if battery is low, charge the battery; otherwise, patrol the area and report status". Take a step back and think deeply about the behavior you need for this command. Then think of the XML structure and the behavior you used.
|
| 16 |
+
RESPONSE:
|
| 17 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 18 |
+
<root BTCPP_format="3" main_tree_to_execute="BatteryManagement">
|
| 19 |
+
<BehaviorTree ID="BatteryManagement">
|
| 20 |
+
<Fallback name="High-Level Decision">
|
| 21 |
+
<Sequence name="Low Battery Sequence">
|
| 22 |
+
<is_battery_low/>
|
| 23 |
+
<charge_battery/>
|
| 24 |
+
</Sequence>
|
| 25 |
+
<Sequence name="Normal Operation Sequence">
|
| 26 |
+
<patrol_area/>
|
| 27 |
+
<report_status/>
|
| 28 |
+
</Sequence>
|
| 29 |
+
</Fallback>
|
| 30 |
+
</BehaviorTree>
|
| 31 |
+
<TreeNodesModel>
|
| 32 |
+
<Condition ID="is_battery_low"/>
|
| 33 |
+
<Action ID="charge_battery"/>
|
| 34 |
+
<Action ID="patrol_area"/>
|
| 35 |
+
<Action ID="report_status"/>
|
| 36 |
+
<Action ID="change_color" editable="true">
|
| 37 |
+
<input_port name="color"/>
|
| 38 |
+
</Action>
|
| 39 |
+
</TreeNodesModel>
|
| 40 |
+
</root>
|
| 41 |
+
</s>
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
<s>
|
| 45 |
+
<<SYS>>You are a helpful, respectful, and honest AI assistant. Your task is to generate well-structured XML code for behavior trees based on the provided instructions.<</SYS>>
|
| 46 |
+
INSTRUCTIONS: It is CRITICAL to use only the following behaviors structured as a dictionary: {
|
| 47 |
+
is_battery_low: Condition node: Check if the battery level is low. Returns SUCCESS if low, FAILURE otherwise.;
|
| 48 |
+
charge_battery: Action Node: Initiate battery charging. Always returns SUCCESS.;
|
| 49 |
+
patrol_area: Action Node: Patrol the designated area. Always returns SUCCESS.;
|
| 50 |
+
report_status: Action Node: Send a status report to the base. Always returns SUCCESS.;
|
| 51 |
+
return_to_patrol: Action Node: Return to patrol route. Always returns SUCCESS.
|
| 52 |
+
is_intruder_detected: Condition node: Check if an intruder is detected. Returns SUCCESS if detected, FAILURE otherwise.;
|
| 53 |
+
sound_alarm: Action Node: Activate the alarm system. Always returns SUCCESS.;
|
| 54 |
+
notify_security: Action Node: Send notification to security team. Always returns SUCCESS.;
|
| 55 |
+
perform_scan: Action Node: Perform a security scan of the area. Always returns SUCCESS.;
|
| 56 |
+
continue_patrol: Action Node: Continue patrolling the perimeter. Always returns SUCCESS.}. To construct a behavior tree in XML format for the following command. If any behavior outside the provided dictionary is used, output (Sorry, I can't do the task).
|
| 57 |
+
USER COMMAND: generate behavior tree to "if an intruder is detected, sound the alarm and notify security; otherwise, perform a scan and continue patrol". Take a step back and think deeply about the behavior you need for this command. Then think of the XML structure and the behavior you used."
|
| 58 |
+
RESPONSE:
|
| 59 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 60 |
+
<root BTCPP_format="3" main_tree_to_execute="SecurityResponse">
|
| 61 |
+
<BehaviorTree ID="SecurityResponse">
|
| 62 |
+
<Sequence name="Intrusion Handling">
|
| 63 |
+
<is_intruder_detected/>
|
| 64 |
+
<Fallback name="Alarm and Notification">
|
| 65 |
+
<sound_alarm/>
|
| 66 |
+
<notify_security/>
|
| 67 |
+
</Fallback>
|
| 68 |
+
</Sequence>
|
| 69 |
+
<Fallback name="Normal Operations">
|
| 70 |
+
<Sequence>
|
| 71 |
+
<perform_scan/>
|
| 72 |
+
<continue_patrol/>
|
| 73 |
+
</Sequence>
|
| 74 |
+
</Fallback>
|
| 75 |
+
</BehaviorTree>
|
| 76 |
+
<TreeNodesModel>
|
| 77 |
+
<Condition ID="is_intruder_detected"/>
|
| 78 |
+
<Action ID="sound_alarm"/>
|
| 79 |
+
<Action ID="notify_security"/>
|
| 80 |
+
<Action ID="perform_scan"/>
|
| 81 |
+
<Action ID="continue_patrol"/>
|
| 82 |
+
<Action ID="change_color" editable="true">
|
| 83 |
+
<input_port name="color"/>
|
| 84 |
+
</Action>
|
| 85 |
+
</TreeNodesModel>
|
| 86 |
+
</root>
|
| 87 |
+
</s>
|
README.md
CHANGED
|
@@ -1,10 +1,111 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SwarmChat: Unified Audio, Text, and Simulation Environment for Human-Swarm Interaction
|
| 2 |
+
|
| 3 |
+
SwarmChat is an innovative project that enables intuitive communication with swarm robotics through natural language. This system integrates advanced audio transcription, text processing, and safety mechanisms with a live simulation environment that visualizes a swarm of agents executing behavior trees.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- **Audio Input Processing**:
|
| 8 |
+
|
| 9 |
+
- Record commands via a microphone.
|
| 10 |
+
- Translate speech into English using the `facebook/seamless-m4t-v2-large` model.
|
| 11 |
+
- Perform a safety check on the translated text before execution.
|
| 12 |
+
|
| 13 |
+
- **Text Input Processing**:
|
| 14 |
+
|
| 15 |
+
- Enter text commands for swarm control.
|
| 16 |
+
- Translate text using EuroLLM (EuroLLM-9B-Instruct-Q4_K_M.gguf).
|
| 17 |
+
- Detect unsafe or inappropriate content with an integrated safety module.
|
| 18 |
+
|
| 19 |
+
- **Safety Module**:
|
| 20 |
+
|
| 21 |
+
- Utilizes a fine-tuned LLaMA-based model (llama-guard-3-8b-q4_k_m.gguf) for safety classification.
|
| 22 |
+
- Identifies unsafe content across predefined categories (e.g., violent crimes, privacy violations, hate speech).
|
| 23 |
+
- Ensures commands comply with safety standards.
|
| 24 |
+
|
| 25 |
+
- **Swarm Simulation**:
|
| 26 |
+
|
| 27 |
+
- Visualize a swarm of agents in a live simulation powered by Violet simulator and Pygame.
|
| 28 |
+
- Agents are controlled by behavior trees defined in an XML file (`tree.xml`), using the `py_trees` framework.
|
| 29 |
+
- Real-time simulation updates streamed via a Gradio web interface.
|
| 30 |
+
|
| 31 |
+
- **Behavior Tree Generator**:
|
| 32 |
+
|
| 33 |
+
- DeepSeek leverages a Llama-based model to dynamically generate behavior trees in XML format.
|
| 34 |
+
- Automatically extracts available behaviors from the SwarmAgent class and constructs a detailed prompt using a predefined XML template.
|
| 35 |
+
- Generates and saves new behavior tree configurations (updating tree.xml) based on user-specified tasks.
|
| 36 |
+
|
| 37 |
+
- **Integrated Interface**:
|
| 38 |
+
- A unified Gradio web interface for both audio and text inputs.
|
| 39 |
+
- Live streaming of the simulation environment.
|
| 40 |
+
- Seamless switching between different input modalities.
|
| 41 |
+
|
| 42 |
+
## Technology Stack
|
| 43 |
+
|
| 44 |
+
- **Backend**:
|
| 45 |
+
|
| 46 |
+
- Python
|
| 47 |
+
- [Transformers](https://huggingface.co/transformers/) (Hugging Face)
|
| 48 |
+
- PyTorch
|
| 49 |
+
- Pygame
|
| 50 |
+
- Threading and Queue modules for simulation management
|
| 51 |
+
|
| 52 |
+
- **Frontend**:
|
| 53 |
+
|
| 54 |
+
- [Gradio](https://gradio.app/) for an interactive web-based interface.
|
| 55 |
+
|
| 56 |
+
- **AI Models**:
|
| 57 |
+
|
| 58 |
+
- **Speech Processing**: `facebook/seamless-m4t-v2-large` for audio transcription and translation.
|
| 59 |
+
- **Text Processing**: EuroLLM (EuroLLM-9B-Instruct-Q4_K_M.gguf) for text translation.
|
| 60 |
+
- **Safety Classification**: LLaMA Guard (llama-guard-3-8b-q4_k_m.gguf) for content safety assessment.
|
| 61 |
+
- **Behavior Tree Generation**: DeepSeek (using a Llama-based model DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf) for creating and updating behavior tree configurations.
|
| 62 |
+
|
| 63 |
+
- **Behavior Trees**:
|
| 64 |
+
- Agents utilize behavior trees—parsed from XML and built with `py_trees`—to dictate their actions within the simulation.
|
| 65 |
+
|
| 66 |
+
## Installation
|
| 67 |
+
|
| 68 |
+
1. **Clone the repository**:
|
| 69 |
+
|
| 70 |
+
```bash
|
| 71 |
+
git clone https://github.com/Inventors-Hub/SwarmChat.git
|
| 72 |
+
cd SwarmChat
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
2. **Install dependencies**:
|
| 76 |
+
```bash
|
| 77 |
+
pip install -r requirements.txt
|
| 78 |
+
```
|
| 79 |
+
3. **Setup AI Models**:
|
| 80 |
+
|
| 81 |
+
- Place the EuroLLM model file (`EuroLLM-9B-Instruct-Q4_K_M.gguf`) at the specified path in `text_processing.py`.
|
| 82 |
+
- Place the LLaMA Guard model file (`llama-guard-3-8b-q4_k_m.gguf`) at the specified path in `safety_module.py`.
|
| 83 |
+
- Place the DeepSeek model file (`DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf`) at the specified path in `bt_generator.py`.
|
| 84 |
+
|
| 85 |
+
4. **Run the Application**:
|
| 86 |
+
```bash
|
| 87 |
+
python app.py
|
| 88 |
+
```
|
| 89 |
+
5. **Access the Interface**:
|
| 90 |
+
|
| 91 |
+
Open your browser and navigate to http://127.0.0.1:7860 to start using SwarmChat.
|
| 92 |
+
|
| 93 |
+
## Overview of Modules
|
| 94 |
+
|
| 95 |
+
- **app.py**
|
| 96 |
+
The main application integrates audio/text processing, behavior tree generation, and the live simulation. It sets up the Gradio interface, handles simulation streaming, and routes user inputs to the appropriate processing modules.
|
| 97 |
+
|
| 98 |
+
- **speech_processing.py**
|
| 99 |
+
Implements audio transcription and translation using the `facebook/seamless-m4t-v2-large` model.
|
| 100 |
+
|
| 101 |
+
- **text_processing.py**
|
| 102 |
+
Translates text commands using EuroLLM (EuroLLM-9B-Instruct-Q4_K_M.gguf).
|
| 103 |
+
|
| 104 |
+
- **safety_module.py**
|
| 105 |
+
Utilizes LLaMA Guard to assess the safety of incoming commands, ensuring compliance with safety policies.
|
| 106 |
+
|
| 107 |
+
- **bt_generator.py**
|
| 108 |
+
Dynamically generates behavior trees in XML format by extracting behaviors from the SwarmAgent class, constructing a prompt, and querying a Llama-based model. The generated XML is saved to `tree.xml` for simulation use.
|
| 109 |
+
|
| 110 |
+
- **simulator_env.py**
|
| 111 |
+
Powers the simulation environment, manages agent behaviors using XML-defined behavior trees, and handles real-time simulation updates.
|
app.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from pygame import Vector2
|
| 3 |
+
import time
|
| 4 |
+
import threading
|
| 5 |
+
import queue
|
| 6 |
+
from simulator_env import StreamableSimulation, SwarmAgent, MyConfig, MyWindow
|
| 7 |
+
|
| 8 |
+
import speech_processing
|
| 9 |
+
import text_processing
|
| 10 |
+
import safety_module
|
| 11 |
+
import bt_generator
|
| 12 |
+
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
BASE = Path(__file__).parent
|
| 16 |
+
|
| 17 |
+
class GradioStreamer:
|
| 18 |
+
_instance = None
|
| 19 |
+
|
| 20 |
+
def __new__(cls):
|
| 21 |
+
if cls._instance is None:
|
| 22 |
+
cls._instance = super(GradioStreamer, cls).__new__(cls)
|
| 23 |
+
cls._instance.initialized = False
|
| 24 |
+
return cls._instance
|
| 25 |
+
|
| 26 |
+
def __init__(self):
|
| 27 |
+
if not self.initialized:
|
| 28 |
+
self.latest_frame = None
|
| 29 |
+
self.running = True
|
| 30 |
+
self.sim = None
|
| 31 |
+
self.sim_thread = None
|
| 32 |
+
self.initialized = True
|
| 33 |
+
self.quit = False
|
| 34 |
+
|
| 35 |
+
def update_frame(self, frame):
|
| 36 |
+
self.latest_frame = frame
|
| 37 |
+
|
| 38 |
+
def run_simulation(self):
|
| 39 |
+
# Instantiate simulation and agents here:
|
| 40 |
+
|
| 41 |
+
nest_pos = Vector2(450, 400)
|
| 42 |
+
target_pos = Vector2(300, 200)
|
| 43 |
+
|
| 44 |
+
agent_images = ["white.png", "green.png", "red circle.png"]
|
| 45 |
+
image_paths = [str(BASE / "images" / fname) for fname in agent_images]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# agent_images_paths = ["./images/white.png", "./images/green.png", "./images/red circle.png"]
|
| 49 |
+
config = MyConfig(radius=25, visualise_chunks=True, movement_speed=2.0)
|
| 50 |
+
self.sim = StreamableSimulation(config=config)
|
| 51 |
+
loaded_agent_images = self.sim._load_image(image_paths)
|
| 52 |
+
# loaded_agent_images = self.sim._load_image(agent_images_paths)
|
| 53 |
+
|
| 54 |
+
# Create agents (each agent builds its own BT in its __init__)
|
| 55 |
+
for _ in range(50):
|
| 56 |
+
agents_pos = Vector2(450, 400)
|
| 57 |
+
agent = SwarmAgent(
|
| 58 |
+
images=loaded_agent_images,
|
| 59 |
+
simulation=self.sim,
|
| 60 |
+
pos=agents_pos,
|
| 61 |
+
nest_pos=nest_pos,
|
| 62 |
+
target_pos=target_pos
|
| 63 |
+
)
|
| 64 |
+
self.sim._agents.add(agent)
|
| 65 |
+
self.sim._all.add(agent)
|
| 66 |
+
# (Optionally spawn obstacles and sites.)
|
| 67 |
+
self.sim.spawn_obstacle(str(BASE / "images" / "rect_obst.png"), 350, 50)
|
| 68 |
+
self.sim.spawn_obstacle(str(BASE / "images" / "rect_obst (1).png"), 100, 350)
|
| 69 |
+
|
| 70 |
+
self.sim.spawn_site(str(BASE / "images" / "rect.png"), 300, 200)
|
| 71 |
+
self.sim.spawn_site(str(BASE / "images" / "nest.png"), 450, 400)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
start_time = time.time() # Record the start time
|
| 75 |
+
# while self.sim.running:
|
| 76 |
+
# self.sim.tick()
|
| 77 |
+
# for agent in self.sim._agents:
|
| 78 |
+
# agent.bt.tick_once() # Continuously update BTs
|
| 79 |
+
# if not self.sim.frame_queue.empty():
|
| 80 |
+
# frame = self.sim.frame_queue.get()
|
| 81 |
+
# self.update_frame(frame)
|
| 82 |
+
# time.sleep(1/30)
|
| 83 |
+
|
| 84 |
+
while self.running:
|
| 85 |
+
self.sim.tick()
|
| 86 |
+
|
| 87 |
+
if not self.sim.frame_queue.empty():
|
| 88 |
+
frame = self.sim.frame_queue.get()
|
| 89 |
+
self.update_frame(frame)
|
| 90 |
+
|
| 91 |
+
time.sleep(1/30) # Maintain a frame rate of ~30 FPS
|
| 92 |
+
# Stop after 1 minute
|
| 93 |
+
if time.time() - start_time >= 120:
|
| 94 |
+
print("Simulation stopped after 1 minute.")
|
| 95 |
+
break
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def stream(self):
|
| 100 |
+
while True:
|
| 101 |
+
if self.sim is not None and self.latest_frame is not None:
|
| 102 |
+
yield self.latest_frame
|
| 103 |
+
else:
|
| 104 |
+
# Optionally, yield a blank image or None.
|
| 105 |
+
yield None
|
| 106 |
+
time.sleep(1/30)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def start_simulation(self):
|
| 110 |
+
"""Start the simulation, creating a new thread if necessary."""
|
| 111 |
+
if not self.sim_thread or not self.sim_thread.is_alive():
|
| 112 |
+
self.running = True # Reset running flag
|
| 113 |
+
self.quit = False # Reset quit flag
|
| 114 |
+
self.latest_frame = None # Clear out the old frame
|
| 115 |
+
self.sim_thread = threading.Thread(target=self.run_simulation, daemon=True)
|
| 116 |
+
self.sim_thread.start()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def clear_frame_queue(self):
|
| 120 |
+
if self.sim:
|
| 121 |
+
try:
|
| 122 |
+
while True:
|
| 123 |
+
self.sim.frame_queue.get_nowait()
|
| 124 |
+
except queue.Empty:
|
| 125 |
+
pass
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def stop_simulation(self):
|
| 130 |
+
print("Stopping Simulation...")
|
| 131 |
+
self.running = False
|
| 132 |
+
self.quit = True
|
| 133 |
+
if self.sim:
|
| 134 |
+
for agent in self.sim._agents:
|
| 135 |
+
agent.bt_active = False
|
| 136 |
+
self.sim.running = False
|
| 137 |
+
self.sim.stop()
|
| 138 |
+
self.clear_frame_queue()
|
| 139 |
+
self.sim = None
|
| 140 |
+
if self.sim_thread and self.sim_thread.is_alive():
|
| 141 |
+
self.sim_thread.join(timeout=2)
|
| 142 |
+
print("Simulation thread terminated.")
|
| 143 |
+
self.latest_frame = None # Clear the displayed frame
|
| 144 |
+
print("Simulation stopped successfully.")
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def test(temp):
|
| 152 |
+
return "test"
|
| 153 |
+
|
| 154 |
+
def test_safe(temp, checkbox):
|
| 155 |
+
return "Safe"
|
| 156 |
+
|
| 157 |
+
def test_LLM_generate_BT(temp):
|
| 158 |
+
print(temp)
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def stop_gradio_interface():
|
| 166 |
+
raise Exception("Simulation stopped!")
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def create_gradio_interface():
|
| 170 |
+
streamer = GradioStreamer()
|
| 171 |
+
|
| 172 |
+
def on_translate_or_process():
|
| 173 |
+
streamer.start_simulation()
|
| 174 |
+
return gr.update(visible=True)
|
| 175 |
+
|
| 176 |
+
def on_stop():
|
| 177 |
+
print("Simulation on_stop")
|
| 178 |
+
streamer.stop_simulation()
|
| 179 |
+
return gr.update(visible=False)
|
| 180 |
+
|
| 181 |
+
behaviors = bt_generator.call_behaviors()
|
| 182 |
+
formatted_behaviors = "\n".join(f"- **{name}**: {doc.split('Returns:')[0].strip()}" for name, doc in behaviors.items())
|
| 183 |
+
# formatted_behaviors = "Test"
|
| 184 |
+
|
| 185 |
+
# Gradio Interface
|
| 186 |
+
with gr.Blocks() as demo:
|
| 187 |
+
gr.Markdown(
|
| 188 |
+
"""
|
| 189 |
+
# 🐝 **SwarmChat:** Enabling Human–Swarm Interaction and Robot Control via Natural Language
|
| 190 |
+
Easily talk to virtual robots, and see the result live.
|
| 191 |
+
"""
|
| 192 |
+
)
|
| 193 |
+
gr.Markdown(
|
| 194 |
+
"""
|
| 195 |
+
**How it works**
|
| 196 |
+
|
| 197 |
+
1. Speak or type a task in *any EU language* (e.g. “Find the target, then line up by colour”).
|
| 198 |
+
2. Press **Start** to launch the simulator. Use **Stop** to halt & reset.
|
| 199 |
+
3. SwarmChat translates your command, runs a safety check, and auto-builds a behaviour tree (BT).
|
| 200 |
+
|
| 201 |
+
> The BT XML is shown on the right so you can copy / save it for real robots.
|
| 202 |
+
"""
|
| 203 |
+
)
|
| 204 |
+
with gr.Tabs():
|
| 205 |
+
# Tab for microphone input
|
| 206 |
+
with gr.Tab("Microphone Input"):
|
| 207 |
+
gr.Markdown("## 🎙️ Voice mode")
|
| 208 |
+
gr.Markdown("""
|
| 209 |
+
Use your microphone to record audio instructions for the swarm. The system translates them into a robot-executable BT.
|
| 210 |
+
""")
|
| 211 |
+
with gr.Row():
|
| 212 |
+
with gr.Column():
|
| 213 |
+
microphone_input = gr.Audio(sources=["microphone"], type="filepath", label="🎙️ Record Audio")
|
| 214 |
+
safety_checkbox = gr.Checkbox(label="Turn off Safety Model")
|
| 215 |
+
with gr.Column():
|
| 216 |
+
output_text_audio = gr.Textbox(label="📄 Translated Instructions to English" )
|
| 217 |
+
safty_check_audio = gr.Textbox(label="✅ Safety Check")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
translate_button_audio = gr.Button("Start")
|
| 221 |
+
|
| 222 |
+
simulation_output = gr.Image(label="Live Stream", streaming=True, visible=False)
|
| 223 |
+
stop_button = gr.Button("Stop")
|
| 224 |
+
with gr.Row():
|
| 225 |
+
with gr.Column():
|
| 226 |
+
gr.Markdown(f"""**🛠 Primitive behaviours available.**\n{formatted_behaviors}\n\nThese are the only low-level actions/conditions the model is allowed to use yet.""")
|
| 227 |
+
|
| 228 |
+
with gr.Column():
|
| 229 |
+
generated_BT_audio = gr.Textbox(label="Generated behavior tree")
|
| 230 |
+
|
| 231 |
+
translate_button_audio.click(
|
| 232 |
+
fn=speech_processing.translate_audio,
|
| 233 |
+
# fn=test,
|
| 234 |
+
inputs=microphone_input,
|
| 235 |
+
outputs=output_text_audio
|
| 236 |
+
).then(
|
| 237 |
+
fn=safety_module.check_safety,
|
| 238 |
+
# fn=test_safe,
|
| 239 |
+
inputs=[output_text_audio,safety_checkbox],
|
| 240 |
+
outputs=safty_check_audio
|
| 241 |
+
).then(
|
| 242 |
+
fn=lambda x: x if x == "Safe" else stop_gradio_interface(),
|
| 243 |
+
inputs=safty_check_audio,
|
| 244 |
+
outputs=None
|
| 245 |
+
).success(
|
| 246 |
+
fn=bt_generator.generate_behavior_tree,
|
| 247 |
+
# fn=test_LLM_generate_BT,
|
| 248 |
+
inputs=output_text_audio,
|
| 249 |
+
outputs=generated_BT_audio
|
| 250 |
+
).success(
|
| 251 |
+
fn=on_translate_or_process,
|
| 252 |
+
outputs=simulation_output
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# stop_button.click(fn=on_stop, outputs=simulation_output)
|
| 256 |
+
# stop_button.click(fn=on_stop, outputs=simulation_output)#.then(js="window.location.reload()")
|
| 257 |
+
stop_button.click(fn=on_stop,outputs=simulation_output)#.then(js="window.location.reload()")
|
| 258 |
+
demo.load(fn=streamer.stream, outputs=simulation_output)
|
| 259 |
+
|
| 260 |
+
# Tab for text input
|
| 261 |
+
with gr.Tab("📝 Text Input"):
|
| 262 |
+
gr.Markdown("## 📝 Text mode")
|
| 263 |
+
gr.Markdown("""
|
| 264 |
+
Enter text-based instructions for the swarm. The system translates them into a robot-executable BT.
|
| 265 |
+
""")
|
| 266 |
+
with gr.Row():
|
| 267 |
+
with gr.Column():
|
| 268 |
+
text_input = gr.Textbox(lines=4, placeholder="Enter your instructions here...", label="📝 Input Text")
|
| 269 |
+
safety_checkbox_text = gr.Checkbox(label="Turn off Safety Model")
|
| 270 |
+
with gr.Column():
|
| 271 |
+
output_text_text = gr.Textbox(label="📄 Translated Instructions to English", lines=2)
|
| 272 |
+
safty_check_text = gr.Textbox(label="✅ Safety Check")
|
| 273 |
+
|
| 274 |
+
process_button_text = gr.Button("Start")
|
| 275 |
+
|
| 276 |
+
simulation_output = gr.Image(label="Live Stream", streaming=True, visible=False)
|
| 277 |
+
stop_button = gr.Button("Stop")
|
| 278 |
+
with gr.Row():
|
| 279 |
+
with gr.Column():
|
| 280 |
+
gr.Markdown(f"""**🛠 Primitive behaviours available.**\n{formatted_behaviors}\n\nThese are the only low-level actions/conditions the model is allowed to use yet.""")
|
| 281 |
+
|
| 282 |
+
with gr.Column():
|
| 283 |
+
generated_BT_text = gr.Textbox(label="Generated behavior tree")
|
| 284 |
+
|
| 285 |
+
process_button_text.click(
|
| 286 |
+
fn=text_processing.translate_text,
|
| 287 |
+
# fn=test,
|
| 288 |
+
inputs=text_input,
|
| 289 |
+
outputs=output_text_text
|
| 290 |
+
).then(
|
| 291 |
+
fn=safety_module.check_safety,
|
| 292 |
+
# fn=test_safe,
|
| 293 |
+
inputs=[output_text_text,safety_checkbox_text],
|
| 294 |
+
outputs=safty_check_text
|
| 295 |
+
).then(
|
| 296 |
+
fn=lambda x: x if x == "Safe" else stop_gradio_interface(),
|
| 297 |
+
inputs=safty_check_text,
|
| 298 |
+
outputs=None
|
| 299 |
+
).success(
|
| 300 |
+
fn=bt_generator.generate_behavior_tree,
|
| 301 |
+
# fn=test_LLM_generate_BT,
|
| 302 |
+
inputs=output_text_text,
|
| 303 |
+
outputs=generated_BT_text
|
| 304 |
+
).success(
|
| 305 |
+
fn=on_translate_or_process,
|
| 306 |
+
outputs=simulation_output
|
| 307 |
+
)
|
| 308 |
+
stop_button.click(fn=on_stop,outputs=simulation_output)#.then(fn=reload_page,outputs=None ,js="window.location.reload()")
|
| 309 |
+
# stop_button.click(fn=on_stop, outputs=simulation_output, js="window.location.reload()")
|
| 310 |
+
demo.load(fn=streamer.stream, outputs=simulation_output)
|
| 311 |
+
|
| 312 |
+
return demo
|
| 313 |
+
|
| 314 |
+
if __name__ == "__main__":
|
| 315 |
+
demo = create_gradio_interface()
|
| 316 |
+
try:
|
| 317 |
+
demo.launch(server_port=7860, server_name="0.0.0.0")
|
| 318 |
+
finally:
|
| 319 |
+
streamer = GradioStreamer()
|
| 320 |
+
streamer.stop_simulation()
|
| 321 |
+
|
bt_generator.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from simulator_env import SwarmAgent
|
| 2 |
+
from huggingface_hub import hf_hub_download
|
| 3 |
+
from llama_cpp import Llama
|
| 4 |
+
import textwrap
|
| 5 |
+
import re
|
| 6 |
+
|
| 7 |
+
# Download only the behavior-tree model shard
|
| 8 |
+
model_path = hf_hub_download(
|
| 9 |
+
repo_id="Inventors-Hub/SwarmChat-models",
|
| 10 |
+
repo_type="model",
|
| 11 |
+
filename="Falcon3-10B-Instruct-BehaviorTree-3epochs.Q4_K_M.gguf",
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# llm = Llama(model_path=model_path, n_ctx=1024*4)
|
| 16 |
+
llm = Llama(
|
| 17 |
+
model_path=model_path,
|
| 18 |
+
n_ctx=1024*4, # down from 4096
|
| 19 |
+
low_vram=True, # llama.cpp low-vram mode
|
| 20 |
+
f16_kv=True, # half-precision kv cache
|
| 21 |
+
use_mmap=True, # mmap file
|
| 22 |
+
use_mlock=False,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
def call_behaviors() -> dict:
|
| 26 |
+
behavior_dict = {}
|
| 27 |
+
for name, attribute in SwarmAgent.__dict__.items():
|
| 28 |
+
if callable(attribute) and not name.startswith("_") \
|
| 29 |
+
and not name.startswith("update") and not name.startswith("obstacle"):
|
| 30 |
+
doc = attribute.__doc__
|
| 31 |
+
if doc is not None:
|
| 32 |
+
# Dedent, strip, and join into one line by replacing newlines and tabs
|
| 33 |
+
cleaned_doc = " ".join(textwrap.dedent(doc).strip().split())
|
| 34 |
+
else:
|
| 35 |
+
cleaned_doc = ""
|
| 36 |
+
behavior_dict[name] = cleaned_doc
|
| 37 |
+
return behavior_dict
|
| 38 |
+
|
| 39 |
+
def extract_behavior_tree(response: str) -> str:
|
| 40 |
+
"""
|
| 41 |
+
Extracts an XML behavior tree from the given response text.
|
| 42 |
+
Looks for a block of XML enclosed in <root...</root> tags.
|
| 43 |
+
"""
|
| 44 |
+
pattern = re.compile(r'(<root.*?</root>)', re.DOTALL)
|
| 45 |
+
match = pattern.search(response)
|
| 46 |
+
if match:
|
| 47 |
+
return match.group(1).strip()
|
| 48 |
+
else:
|
| 49 |
+
# If no valid XML block is found, return the original response.
|
| 50 |
+
return response.strip()
|
| 51 |
+
|
| 52 |
+
def save_behavior_tree(tree_xml: str, file_name: str = "tree.xml") -> None:
|
| 53 |
+
"""
|
| 54 |
+
Saves the behavior tree XML to a file.
|
| 55 |
+
"""
|
| 56 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
| 57 |
+
f.write(tree_xml)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def construct_prompt(prompt: str, prompt_type: str="one") -> str:
|
| 61 |
+
|
| 62 |
+
behaviors = call_behaviors()
|
| 63 |
+
behaviors_text = "\n".join(f"{name}: {doc}" for name, doc in behaviors.items())
|
| 64 |
+
|
| 65 |
+
plan_prompt = f"""
|
| 66 |
+
<s>
|
| 67 |
+
<<SYS>>You are a helpful, respectful, and honest AI assistant. Your task is to generate well-structured XML code for behavior trees based on the provided instructions.<</SYS>>
|
| 68 |
+
INSTRUCTIONS: It is CRITICAL to use only the following behaviors structured as a dictionary: {behaviors_text} to construct behavior tree in XML format for the following command. Including any behavior that is not in the provided dictionary can result in damage to the agents and potentially humans, therefore you are not allowed to do so. AVOID AT ALL COSTS.
|
| 69 |
+
USER COMMAND: generate behavior tree to "{prompt}". Take a step back and think deeply about the behavior you need for this command. Consider the XML structure and the behaviors you use.
|
| 70 |
+
The output MUST follow this XML structure exactly, including:
|
| 71 |
+
- A root element with <root BTCPP_format and main_tree_to_execute attributes.
|
| 72 |
+
- A <BehaviorTree> element with an inner structure of Sequences, Fallback, Conditions, and Actions.
|
| 73 |
+
- A <TreeNodesModel> section listing all node models.
|
| 74 |
+
- No additional text or commentary outside the XML.
|
| 75 |
+
Output only the XML behavior tree without extra text.
|
| 76 |
+
OUTPUT:
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
if prompt_type == "zero":
|
| 80 |
+
return plan_prompt
|
| 81 |
+
elif prompt_type == "one":
|
| 82 |
+
path = "Prompt_One_shot.txt"
|
| 83 |
+
with open(path, "r", encoding="utf-8") as file:
|
| 84 |
+
file_content = file.read()
|
| 85 |
+
return f"{file_content} {plan_prompt}"
|
| 86 |
+
elif prompt_type == "two":
|
| 87 |
+
path = "Prompt_Two_Shot.txt"
|
| 88 |
+
with open(path, "r", encoding="utf-8") as file:
|
| 89 |
+
file_content = file.read()
|
| 90 |
+
return f"{file_content} {plan_prompt}"
|
| 91 |
+
else:
|
| 92 |
+
raise ValueError("Unknown prompt type provided.")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def generate_behavior_tree(task_prompt: str) -> str:
|
| 96 |
+
|
| 97 |
+
prompt = construct_prompt(task_prompt)
|
| 98 |
+
|
| 99 |
+
print("\n\n",prompt,"\n\n")
|
| 100 |
+
|
| 101 |
+
output = llm(
|
| 102 |
+
prompt,
|
| 103 |
+
temperature=0,
|
| 104 |
+
max_tokens=1024,
|
| 105 |
+
top_p=0.95,
|
| 106 |
+
top_k=50,
|
| 107 |
+
repeat_penalty=1.1
|
| 108 |
+
)
|
| 109 |
+
response = output.get("choices", [{}])[0].get("text", "").strip()
|
| 110 |
+
tree_xml = extract_behavior_tree(response)
|
| 111 |
+
save_behavior_tree(tree_xml)
|
| 112 |
+
print("\n response: \n", response)
|
| 113 |
+
return tree_xml
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# Example usage:
|
| 117 |
+
if __name__ == "__main__":
|
| 118 |
+
task = "Generate a behavior tree to just form a line."
|
| 119 |
+
response = generate_behavior_tree(task)
|
| 120 |
+
print("Generated behavior tree response:")
|
| 121 |
+
print(response)
|
parser.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import xml.etree.ElementTree as ET
|
| 2 |
+
from typing import List, Dict
|
| 3 |
+
import py_trees as pt
|
| 4 |
+
# from simulator_env import SwarmAgent
|
| 5 |
+
|
| 6 |
+
########################################################################
|
| 7 |
+
# 1. XML Parsing Classes and Functions
|
| 8 |
+
########################################################################
|
| 9 |
+
|
| 10 |
+
class Node:
|
| 11 |
+
"""
|
| 12 |
+
A generic node representing a behavior tree element.
|
| 13 |
+
It holds:
|
| 14 |
+
- tag: the element's tag (e.g., "Sequence", "say", "SubTree", etc.)
|
| 15 |
+
- attributes: a dict of the element's attributes (e.g., name, num_cycles, port values)
|
| 16 |
+
- children: a list of child Node instances (which may be other behaviors or sub-elements)
|
| 17 |
+
- ports: a dict grouping any port definitions found as child elements (input_port, output_port, inout_port)
|
| 18 |
+
"""
|
| 19 |
+
def __init__(self, tag: str, attributes: Dict[str, str]):
|
| 20 |
+
self.tag = tag
|
| 21 |
+
self.attributes = attributes.copy()
|
| 22 |
+
self.children: List['Node'] = []
|
| 23 |
+
self.ports: Dict[str, List[Dict[str, str]]] = {}
|
| 24 |
+
|
| 25 |
+
def __repr__(self):
|
| 26 |
+
return (f"Node(tag={self.tag!r}, attributes={self.attributes!r}, "
|
| 27 |
+
f"children={self.children!r}, ports={self.ports!r})")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def parse_node(element: ET.Element) -> Node:
|
| 31 |
+
"""
|
| 32 |
+
Recursively parse an XML element into a Node.
|
| 33 |
+
This function:
|
| 34 |
+
- Reads the element's tag and attributes.
|
| 35 |
+
- Checks for child elements that define ports (input_port, output_port, inout_port) and stores them.
|
| 36 |
+
- Recursively parses any other child elements as behavior nodes.
|
| 37 |
+
"""
|
| 38 |
+
node = Node(element.tag, element.attrib)
|
| 39 |
+
|
| 40 |
+
for child in element:
|
| 41 |
+
# Check if the child defines a port (this covers the new "inout_port" as well)
|
| 42 |
+
if child.tag in ['input_port', 'output_port', 'inout_port']:
|
| 43 |
+
if child.tag not in node.ports:
|
| 44 |
+
node.ports[child.tag] = []
|
| 45 |
+
node.ports[child.tag].append(child.attrib)
|
| 46 |
+
else:
|
| 47 |
+
# Otherwise, treat the child as a regular behavior node.
|
| 48 |
+
child_node = parse_node(child)
|
| 49 |
+
node.children.append(child_node)
|
| 50 |
+
|
| 51 |
+
return node
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def parse_behavior_trees(xml_file: str) -> List[Node]:
|
| 55 |
+
"""
|
| 56 |
+
Parses the given XML file and returns a list of BehaviorTree nodes.
|
| 57 |
+
Each <BehaviorTree> element is considered a complete behavior tree (or subtree).
|
| 58 |
+
"""
|
| 59 |
+
tree = ET.parse(xml_file)
|
| 60 |
+
root = tree.getroot()
|
| 61 |
+
|
| 62 |
+
behavior_trees = []
|
| 63 |
+
for bt_elem in root.findall('BehaviorTree'):
|
| 64 |
+
bt_node = parse_node(bt_elem)
|
| 65 |
+
behavior_trees.append(bt_node)
|
| 66 |
+
return behavior_trees
|
| 67 |
+
|
| 68 |
+
########################################################################
|
| 69 |
+
# 2. Functions that will be executed by the BT (your actions, conditions, etc.)
|
| 70 |
+
########################################################################
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_function_mapping():
|
| 74 |
+
from simulator_env import SwarmAgent
|
| 75 |
+
mapping = {
|
| 76 |
+
name: func
|
| 77 |
+
for name, func in SwarmAgent.__dict__.items()
|
| 78 |
+
if callable(func) and not name.startswith("_") and name not in ['update','_inject_agent','obstacle','_speak']
|
| 79 |
+
}
|
| 80 |
+
# print("mapping: \n", mapping)
|
| 81 |
+
return mapping
|
| 82 |
+
|
| 83 |
+
########################################################################
|
| 84 |
+
# 3. Helpers and Custom py_trees Behavior Wrappers
|
| 85 |
+
########################################################################
|
| 86 |
+
|
| 87 |
+
def convert_param(val: str):
|
| 88 |
+
"""
|
| 89 |
+
Attempt to convert a string parameter to int or float if possible.
|
| 90 |
+
Otherwise, return the string.
|
| 91 |
+
"""
|
| 92 |
+
try:
|
| 93 |
+
return int(val)
|
| 94 |
+
except ValueError:
|
| 95 |
+
try:
|
| 96 |
+
return float(val)
|
| 97 |
+
except ValueError:
|
| 98 |
+
return val
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# A simple leaf node that wraps a function call.
|
| 104 |
+
class FunctionAction(pt.behaviour.Behaviour):
|
| 105 |
+
def __init__(self, name, function, params):
|
| 106 |
+
super(FunctionAction, self).__init__(name=name)
|
| 107 |
+
self.function = function
|
| 108 |
+
self.params = params
|
| 109 |
+
self.agent = None # Will be set later
|
| 110 |
+
|
| 111 |
+
def update(self):
|
| 112 |
+
# Pass the agent (context) into the function
|
| 113 |
+
status = self.function(self.agent, **self.params)
|
| 114 |
+
return status
|
| 115 |
+
|
| 116 |
+
# A decorator node that wraps a child behavior and calls a function.
|
| 117 |
+
class FunctionDecorator(pt.decorators.Decorator):
|
| 118 |
+
def __init__(self, name, function, params, child):
|
| 119 |
+
super(FunctionDecorator, self).__init__(name=name, child=child)
|
| 120 |
+
self.function = function
|
| 121 |
+
self.params = params
|
| 122 |
+
|
| 123 |
+
def update(self):
|
| 124 |
+
# Ensure the child is updated.
|
| 125 |
+
self.decorated.tick_once()
|
| 126 |
+
child_status = self.decorated.status
|
| 127 |
+
# Call the decorator function (for side effects)
|
| 128 |
+
self.function(**self.params)
|
| 129 |
+
# For this example, we simply pass through the child's status.
|
| 130 |
+
return child_status
|
| 131 |
+
|
| 132 |
+
# A control node that has one child and then calls a function.
|
| 133 |
+
class FunctionControl(pt.behaviour.Behaviour):
|
| 134 |
+
def __init__(self, name, function, params, child):
|
| 135 |
+
super(FunctionControl, self).__init__(name=name)
|
| 136 |
+
self.function = function
|
| 137 |
+
self.params = params
|
| 138 |
+
self.child = child
|
| 139 |
+
|
| 140 |
+
def update(self):
|
| 141 |
+
self.child.tick_once()
|
| 142 |
+
return self.function(**self.params)
|
| 143 |
+
|
| 144 |
+
# Define an AlwaysSuccess behavior to use when an unknown node is encountered.
|
| 145 |
+
class AlwaysSuccess(pt.behaviour.Behaviour):
|
| 146 |
+
def __init__(self, name="AlwaysSuccess"):
|
| 147 |
+
super(AlwaysSuccess, self).__init__(name=name)
|
| 148 |
+
|
| 149 |
+
def update(self):
|
| 150 |
+
return pt.common.Status.SUCCESS
|
| 151 |
+
|
| 152 |
+
########################################################################
|
| 153 |
+
# 4. Convert the Parsed Node Tree into a py_trees Behavior Tree
|
| 154 |
+
########################################################################
|
| 155 |
+
|
| 156 |
+
def build_behavior(node: Node, subtree_mapping: Dict[str, Node]) -> pt.behaviour.Behaviour:
|
| 157 |
+
"""
|
| 158 |
+
Recursively converts a parsed Node (from XML) into a py_trees behavior.
|
| 159 |
+
"""
|
| 160 |
+
# Special case: unwrap the BehaviorTree container.
|
| 161 |
+
if node.tag == "BehaviorTree":
|
| 162 |
+
if node.children:
|
| 163 |
+
return build_behavior(node.children[0], subtree_mapping)
|
| 164 |
+
else:
|
| 165 |
+
return AlwaysSuccess(name="Empty BehaviorTree")
|
| 166 |
+
|
| 167 |
+
# Define which tags represent which kinds of nodes.
|
| 168 |
+
composite_tags = ["Sequence", "Fallback"]
|
| 169 |
+
repeat_tags = ["Repeat"]
|
| 170 |
+
decorator_tags = ["Inverter","AlwaysFailure", "AlwaysSuccess"]
|
| 171 |
+
control_tags = []
|
| 172 |
+
|
| 173 |
+
mapping = get_function_mapping()
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
if node.tag == "Sequence":
|
| 177 |
+
composite = pt.composites.Sequence(
|
| 178 |
+
name=node.attributes.get('name', 'Sequence'),
|
| 179 |
+
memory=True # Added memory parameter
|
| 180 |
+
)
|
| 181 |
+
for child in node.children:
|
| 182 |
+
composite.add_child(build_behavior(child, subtree_mapping))
|
| 183 |
+
return composite
|
| 184 |
+
|
| 185 |
+
elif node.tag == "Fallback":
|
| 186 |
+
composite = pt.composites.Selector(
|
| 187 |
+
name=node.attributes.get('name', 'Fallback'),
|
| 188 |
+
memory=True # Added memory parameter
|
| 189 |
+
)
|
| 190 |
+
for child in node.children:
|
| 191 |
+
composite.add_child(build_behavior(child, subtree_mapping))
|
| 192 |
+
return composite
|
| 193 |
+
|
| 194 |
+
elif node.tag in repeat_tags:
|
| 195 |
+
if len(node.children) != 1:
|
| 196 |
+
print("Repeat node must have exactly one child!")
|
| 197 |
+
child_behavior = build_behavior(node.children[0], subtree_mapping)
|
| 198 |
+
# Read the number of cycles from the XML; default to 1 if not provided.
|
| 199 |
+
num_cycles = int(node.attributes.get('num_cycles', 1))
|
| 200 |
+
# Create the Repeat decorator, providing the required 'num_success' parameter.
|
| 201 |
+
repeat_decorator = pt.decorators.Repeat(
|
| 202 |
+
name=node.attributes.get('name', 'Repeat'),
|
| 203 |
+
child=child_behavior,
|
| 204 |
+
num_success=num_cycles # Provide the required parameter here.
|
| 205 |
+
)
|
| 206 |
+
return repeat_decorator
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
elif node.tag == "SubTree":
|
| 210 |
+
subtree_id = node.attributes.get('ID')
|
| 211 |
+
if subtree_id in subtree_mapping:
|
| 212 |
+
return build_behavior(subtree_mapping[subtree_id], subtree_mapping)
|
| 213 |
+
else:
|
| 214 |
+
print(f"SubTree with ID {subtree_id} not found!")
|
| 215 |
+
return AlwaysSuccess(name="Missing SubTree")
|
| 216 |
+
|
| 217 |
+
elif node.tag in decorator_tags:
|
| 218 |
+
if len(node.children) != 1:
|
| 219 |
+
print("Decorator node must have exactly one child!")
|
| 220 |
+
child_behavior = build_behavior(node.children[0], subtree_mapping)
|
| 221 |
+
params = {k: convert_param(v) for k, v in node.attributes.items() if k != "name"}
|
| 222 |
+
return FunctionDecorator(
|
| 223 |
+
name=node.attributes.get('name', node.tag),
|
| 224 |
+
function=mapping[node.tag],
|
| 225 |
+
params=params,
|
| 226 |
+
child=child_behavior
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
elif node.tag in control_tags:
|
| 230 |
+
if len(node.children) != 1:
|
| 231 |
+
print("Control node must have exactly one child!")
|
| 232 |
+
child_behavior = build_behavior(node.children[0], subtree_mapping)
|
| 233 |
+
params = {k: convert_param(v) for k, v in node.attributes.items() if k != "name"}
|
| 234 |
+
return FunctionControl(
|
| 235 |
+
name=node.attributes.get('name', node.tag),
|
| 236 |
+
function=mapping[node.tag],
|
| 237 |
+
params=params,
|
| 238 |
+
child=child_behavior
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
else:
|
| 242 |
+
if node.tag in mapping:
|
| 243 |
+
params = {k: convert_param(v) for k, v in node.attributes.items() if k != "name"}
|
| 244 |
+
return FunctionAction(
|
| 245 |
+
name=node.attributes.get('name', node.tag),
|
| 246 |
+
function=mapping[node.tag],
|
| 247 |
+
params=params
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
return AlwaysSuccess(name=node.attributes.get('name', node.tag))
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
########################################################################
|
| 254 |
+
# 5. Main: Parse XML, Build the py_trees Tree, and Execute It
|
| 255 |
+
########################################################################
|
| 256 |
+
|
| 257 |
+
def print_node(node, indent=0):
|
| 258 |
+
ind = " " * indent
|
| 259 |
+
print(f"{ind}{node.tag}: {node.attributes}")
|
| 260 |
+
# Optionally print any ports
|
| 261 |
+
for port_type, port_list in node.ports.items():
|
| 262 |
+
for port in port_list:
|
| 263 |
+
print(f"{ind} {port_type}: {port}")
|
| 264 |
+
for child in node.children:
|
| 265 |
+
print_node(child, indent + 1)
|
| 266 |
+
|
| 267 |
+
# Usage in your main:
|
| 268 |
+
if __name__ == "__main__":
|
| 269 |
+
file_path = 'tree.xml'
|
| 270 |
+
trees = parse_behavior_trees(file_path)
|
| 271 |
+
for tree in trees:
|
| 272 |
+
print_node(tree)
|
| 273 |
+
|
| 274 |
+
# if __name__ == "__main__":
|
| 275 |
+
# The XML file with your behavior tree.
|
| 276 |
+
# file_path = 'tree.xml'
|
| 277 |
+
|
| 278 |
+
# # 1. Parse the XML into a list of BehaviorTree nodes.
|
| 279 |
+
# trees = parse_behavior_trees(file_path)
|
| 280 |
+
# # Build a mapping of BehaviorTree ID to Node.
|
| 281 |
+
# print(trees)
|
| 282 |
+
# subtree_mapping = { tree.attributes.get("ID"): tree for tree in trees }
|
| 283 |
+
# print()
|
| 284 |
+
|
| 285 |
+
# print(subtree_mapping)
|
requirements.txt
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
huggingface-hub==0.33.0
|
| 2 |
+
gradio==5.12.0
|
| 3 |
+
numpy==1.26.4
|
| 4 |
+
torch==2.5.1
|
| 5 |
+
transformers==4.48.0
|
| 6 |
+
llama_cpp_python==0.3.9
|
| 7 |
+
pydub==0.25.1
|
| 8 |
+
accelerate==1.2.1
|
| 9 |
+
pygame==2.6.1
|
| 10 |
+
violet-simulator==0.2.4
|
| 11 |
+
pyttsx3==2.98
|
| 12 |
+
py_trees==2.3.0
|
| 13 |
+
|
| 14 |
+
blobfile==3.0.0
|
| 15 |
+
sentencepiece==0.2.0
|
safety_module.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_cpp import Llama
|
| 2 |
+
from huggingface_hub import hf_hub_download
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
# Download only the safety model shard from the model repo
|
| 6 |
+
model_path = hf_hub_download(
|
| 7 |
+
repo_id="Inventors-Hub/SwarmChat-models",
|
| 8 |
+
repo_type="model",
|
| 9 |
+
filename="llama-guard-3-8b-q4_k_m.gguf",
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Load the Llama model for safety classification
|
| 14 |
+
# model_path = r".\models\llama-guard-3-8b-q4_k_m.gguf"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# llm = Llama(model_path=model_path, n_ctx=1024)
|
| 18 |
+
llm = Llama(
|
| 19 |
+
model_path=model_path,
|
| 20 |
+
n_ctx=512, # down from 4096
|
| 21 |
+
low_vram=True, # llama.cpp low-vram mode
|
| 22 |
+
f16_kv=True, # half-precision kv cache
|
| 23 |
+
use_mmap=True, # mmap file
|
| 24 |
+
use_mlock=False,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def check_safety(text, checkbox):
|
| 29 |
+
|
| 30 |
+
if checkbox:
|
| 31 |
+
return "Safe"
|
| 32 |
+
|
| 33 |
+
categories = {
|
| 34 |
+
"S1": "Violent Crimes.",
|
| 35 |
+
"S2": "Non-Violent Crimes.",
|
| 36 |
+
"S3": "Sex-Related Crimes.",
|
| 37 |
+
"S4": "Child Sexual Exploitation.",
|
| 38 |
+
"S5": "Defamation.",
|
| 39 |
+
"S6": "Specialized Advice.",
|
| 40 |
+
"S7": "Privacy.",
|
| 41 |
+
"S8": "Intellectual Property.",
|
| 42 |
+
"S9": "Indiscriminate Weapons.",
|
| 43 |
+
"S10": "Hate.",
|
| 44 |
+
"S11": "Suicide & Self-Harm.",
|
| 45 |
+
"S12": "Sexual Content.",
|
| 46 |
+
"S13": "Elections.",
|
| 47 |
+
"S14": "Code Interpreter Abuse.",
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
input_prompt = f"""
|
| 51 |
+
<|begin_of_text|><|start_header_id|>user<|end_header_id|>
|
| 52 |
+
|
| 53 |
+
Task: Determine if the following text contains unsafe content according to our safety policy. Classify the content into one or more of the categories below if unsafe.
|
| 54 |
+
|
| 55 |
+
<BEGIN UNSAFE CONTENT CATEGORIES>
|
| 56 |
+
{', '.join([f'{key}: {value}' for key, value in categories.items()])}
|
| 57 |
+
<END UNSAFE CONTENT CATEGORIES>
|
| 58 |
+
|
| 59 |
+
Text: {text}
|
| 60 |
+
|
| 61 |
+
Provide your safety assessment:
|
| 62 |
+
- Respond with "safe" if the text is appropriate.
|
| 63 |
+
- If unsafe, respond with: "unsafe" followed by a comma-separated list of the violated categories (e.g., S1, S2).
|
| 64 |
+
|
| 65 |
+
<|eot_id|>
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
# Generate a response from the model
|
| 69 |
+
output = llm(input_prompt, temperature=0)
|
| 70 |
+
# Extract the model's response
|
| 71 |
+
response = output.get("choices", [{}])[0].get("text", "").strip()
|
| 72 |
+
print("\n\n", response, "\n\n")
|
| 73 |
+
# Parse the response
|
| 74 |
+
if response.lower().startswith("safe"):
|
| 75 |
+
return "Safe"
|
| 76 |
+
else:
|
| 77 |
+
unsafe_categories = categories[response.split("unsafe", 1)[-1].strip()]
|
| 78 |
+
return f"Unsafe: This prompt is categorized as '{unsafe_categories}'"
|
| 79 |
+
|
| 80 |
+
# unsafe_categories = categories[response.split("unsafe", 1)[-1].strip()]
|
| 81 |
+
# return f"Unsafe: This prompt categorized as '{unsafe_categories}'"
|
simulator_env.py
ADDED
|
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import time
|
| 3 |
+
import pygame as pg
|
| 4 |
+
from vi import Agent, Config, Window, HeadlessSimulation
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from queue import Queue
|
| 7 |
+
import numpy as np
|
| 8 |
+
from pygame.math import Vector2
|
| 9 |
+
import py_trees as pt
|
| 10 |
+
import parser
|
| 11 |
+
import xml.etree.ElementTree as ET
|
| 12 |
+
import threading
|
| 13 |
+
# import pyttsx3
|
| 14 |
+
|
| 15 |
+
class MyWindow(Window):
|
| 16 |
+
"""Custom window class for simulation."""
|
| 17 |
+
def __init__(self, width=800, height=600):
|
| 18 |
+
super().__init__(width, height)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class MyConfig(Config):
|
| 24 |
+
"""Custom configuration for simulation."""
|
| 25 |
+
def __init__(self, radius=25, visualise_chunks=True, window=None, movement_speed=2.0):
|
| 26 |
+
super().__init__(
|
| 27 |
+
radius=radius,
|
| 28 |
+
visualise_chunks=visualise_chunks,
|
| 29 |
+
window=window or MyWindow(800, 600),
|
| 30 |
+
movement_speed=movement_speed
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class SwarmAgent(Agent):
|
| 37 |
+
def __init__(self, images, simulation, pos, nest_pos, target_pos):
|
| 38 |
+
super().__init__(images=images, simulation=simulation)
|
| 39 |
+
# Ensure the agent gets the configuration from the simulation.
|
| 40 |
+
self.config = simulation.config
|
| 41 |
+
|
| 42 |
+
self.pos = pos
|
| 43 |
+
self.nest_pos = nest_pos
|
| 44 |
+
self.target_pos = target_pos
|
| 45 |
+
self.target_detected_flag = False
|
| 46 |
+
self.target_reached_flag = False
|
| 47 |
+
self.is_agent_in_nest_flag = False
|
| 48 |
+
self.obstacle_radius = 3
|
| 49 |
+
self.state = "seeking"
|
| 50 |
+
self.bt_active = True # Add a flag
|
| 51 |
+
# self.tts_engine = pyttsx3.init() # Initialize text-to-speech engine
|
| 52 |
+
|
| 53 |
+
file_path = "tree.xml"
|
| 54 |
+
trees = parser.parse_behavior_trees(file_path)
|
| 55 |
+
subtree_mapping = { tree.attributes.get("ID"): tree for tree in trees }
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
xml_tree = ET.parse(file_path)
|
| 59 |
+
xml_root = xml_tree.getroot()
|
| 60 |
+
main_tree_id = xml_root.attrib.get("main_tree_to_execute")
|
| 61 |
+
|
| 62 |
+
if not main_tree_id or main_tree_id not in subtree_mapping:
|
| 63 |
+
raise ValueError("Main tree not found in the XML!")
|
| 64 |
+
main_tree_node = subtree_mapping[main_tree_id]
|
| 65 |
+
|
| 66 |
+
# Build the py_trees tree:
|
| 67 |
+
self.bt = parser.build_behavior(main_tree_node, subtree_mapping)
|
| 68 |
+
|
| 69 |
+
# Inject the agent instance into all leaf behaviors.
|
| 70 |
+
self._inject_agent(self.bt)
|
| 71 |
+
|
| 72 |
+
def _inject_agent(self, node):
|
| 73 |
+
"""Recursively set the agent for any custom BT nodes."""
|
| 74 |
+
if hasattr(node, "agent"):
|
| 75 |
+
node.agent = self
|
| 76 |
+
if hasattr(node, "children"):
|
| 77 |
+
for child in node.children:
|
| 78 |
+
self._inject_agent(child)
|
| 79 |
+
|
| 80 |
+
def update(self):
|
| 81 |
+
if self.bt_active:
|
| 82 |
+
self.bt.tick_once()
|
| 83 |
+
|
| 84 |
+
def obstacle(self):
|
| 85 |
+
"""
|
| 86 |
+
Check for obstacle intersections within a predefined radius.
|
| 87 |
+
Returns: True if an obstacle is detected within the radius, False otherwise.
|
| 88 |
+
"""
|
| 89 |
+
for intersection in self.obstacle_intersections(scale=self.obstacle_radius):
|
| 90 |
+
return True
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# def update(self):
|
| 95 |
+
# self.bt.tick_once()
|
| 96 |
+
# # self.root_node.run(self)
|
| 97 |
+
|
| 98 |
+
# def say(self, message: str):
|
| 99 |
+
# """
|
| 100 |
+
# Action Node: Speak the provided message using text-to-speech if it hasn't been spoken before.
|
| 101 |
+
# Args: message (str): The message to be spoken.
|
| 102 |
+
# Returns: Always returns SUCCESS, indicating the action was executed.
|
| 103 |
+
# """
|
| 104 |
+
# if not hasattr(self, 'old_message'):
|
| 105 |
+
# self.old_message = []
|
| 106 |
+
|
| 107 |
+
# # Only speak the message if it has not been spoken before (i.e. not in old_message)
|
| 108 |
+
# if message not in self.old_message:
|
| 109 |
+
# self.tts_engine.say(message)
|
| 110 |
+
# self.tts_engine.runAndWait()
|
| 111 |
+
# self.old_message.append(message)
|
| 112 |
+
|
| 113 |
+
# return pt.common.Status.SUCCESS
|
| 114 |
+
|
| 115 |
+
def flocking(self):
|
| 116 |
+
"""
|
| 117 |
+
Action Node: Adjust the agent's move vector by blending alignment and separation forces from nearby agents.
|
| 118 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 119 |
+
"""
|
| 120 |
+
nearby_agents = list(self.in_proximity_accuracy().without_distance())
|
| 121 |
+
if not nearby_agents:
|
| 122 |
+
return pt.common.Status.SUCCESS
|
| 123 |
+
|
| 124 |
+
alignment = Vector2(0, 0)
|
| 125 |
+
separation = Vector2(0, 0)
|
| 126 |
+
separation_count = 0
|
| 127 |
+
|
| 128 |
+
# Desired minimum separation distance (adjust as needed)
|
| 129 |
+
separation_threshold = 3
|
| 130 |
+
|
| 131 |
+
# Calculate alignment and separation contributions.
|
| 132 |
+
for other in nearby_agents:
|
| 133 |
+
alignment += other.move
|
| 134 |
+
|
| 135 |
+
diff = self.pos - other.pos
|
| 136 |
+
distance = diff.length()
|
| 137 |
+
if 0 < distance < separation_threshold:
|
| 138 |
+
# The closer the neighbor, the stronger the repulsive force.
|
| 139 |
+
separation += diff.normalize() * (separation_threshold - distance)
|
| 140 |
+
separation_count += 1
|
| 141 |
+
|
| 142 |
+
# Average the alignment vector over all neighbors.
|
| 143 |
+
alignment /= len(nearby_agents)
|
| 144 |
+
|
| 145 |
+
# If any agents are too close, average the separation vector.
|
| 146 |
+
if separation_count > 0:
|
| 147 |
+
separation /= separation_count
|
| 148 |
+
|
| 149 |
+
# Blend the two influences. Here, alignment has a stronger influence than separation.
|
| 150 |
+
# Adjust the blend factor (e.g., 0.3) to control separation influence.
|
| 151 |
+
blended_force = alignment.lerp(separation, 0.3)
|
| 152 |
+
|
| 153 |
+
# Smoothly blend the current move with the blended force.
|
| 154 |
+
self.move = self.move.lerp(blended_force, 0.5)
|
| 155 |
+
|
| 156 |
+
# Normalize and scale to the configured movement speed.
|
| 157 |
+
if self.move.length() > 0:
|
| 158 |
+
self.move = self.move.normalize() * self.config.movement_speed
|
| 159 |
+
|
| 160 |
+
# Update position and apply wrap-around if necessary.
|
| 161 |
+
self.pos += self.move
|
| 162 |
+
self.there_is_no_escape()
|
| 163 |
+
|
| 164 |
+
return pt.common.Status.SUCCESS
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def align_with_swarm(self):
|
| 169 |
+
"""
|
| 170 |
+
Action Node: Align the agent's move vector with the average movement of nearby agents.
|
| 171 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 172 |
+
"""
|
| 173 |
+
nearby_agents = list(self.in_proximity_accuracy().without_distance())
|
| 174 |
+
if not nearby_agents:
|
| 175 |
+
return pt.common.Status.SUCCESS
|
| 176 |
+
|
| 177 |
+
avg_direction = Vector2(0, 0)
|
| 178 |
+
for other in nearby_agents:
|
| 179 |
+
avg_direction += other.move
|
| 180 |
+
avg_direction /= len(nearby_agents)
|
| 181 |
+
|
| 182 |
+
# Blend current movement with average direction.
|
| 183 |
+
self.move = self.move.lerp(avg_direction, 0.5)
|
| 184 |
+
if self.move.length() > 0:
|
| 185 |
+
self.move = self.move.normalize() * self.config.movement_speed
|
| 186 |
+
|
| 187 |
+
# Update position and wrap-around if necessary.
|
| 188 |
+
self.pos += self.move
|
| 189 |
+
self.there_is_no_escape()
|
| 190 |
+
|
| 191 |
+
return pt.common.Status.SUCCESS
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def is_obstacle_detected(self):
|
| 195 |
+
"""
|
| 196 |
+
Condition node: Determine if any obstacles are detected in the vicinity of the agent.
|
| 197 |
+
Returns: SUCCESS if an obstacle is detected, FAILURE otherwise.
|
| 198 |
+
"""
|
| 199 |
+
if self.obstacle():
|
| 200 |
+
return pt.common.Status.SUCCESS
|
| 201 |
+
else:
|
| 202 |
+
return pt.common.Status.FAILURE
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def avoid_obstacle(self):
|
| 206 |
+
"""
|
| 207 |
+
Action node: Execute an action to avoid detected obstacles.
|
| 208 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 209 |
+
"""
|
| 210 |
+
self.move.rotate_ip(180)
|
| 211 |
+
return pt.common.Status.SUCCESS
|
| 212 |
+
|
| 213 |
+
def is_target_detected(self):
|
| 214 |
+
"""
|
| 215 |
+
Condition node: Check if the target is within a detectable distance from the agent's position.
|
| 216 |
+
Returns: SUCCESS if the target is within 20 units of distance, FAILURE otherwise.
|
| 217 |
+
"""
|
| 218 |
+
distance = math.dist(self.target_pos, self.pos)
|
| 219 |
+
if distance <= 20:
|
| 220 |
+
self.target_detected_flag = True
|
| 221 |
+
if self.target_detected_flag:
|
| 222 |
+
return pt.common.Status.SUCCESS
|
| 223 |
+
return pt.common.Status.FAILURE
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def is_target_reached(self):
|
| 227 |
+
"""
|
| 228 |
+
Condition node: Check if the agent has reached the target.
|
| 229 |
+
Returns: SUCCESS if the target is within 15 units of distance, FAILURE otherwise.
|
| 230 |
+
"""
|
| 231 |
+
distance = math.dist(self.target_pos, self.pos)
|
| 232 |
+
if distance <= 15:
|
| 233 |
+
self.target_reached_flag = True
|
| 234 |
+
if self.target_reached_flag:
|
| 235 |
+
return pt.common.Status.SUCCESS
|
| 236 |
+
return pt.common.Status.FAILURE
|
| 237 |
+
|
| 238 |
+
def change_color(self, color):
|
| 239 |
+
"""
|
| 240 |
+
Action Node: Change the agent's color to 'white', 'green', or 'red'.
|
| 241 |
+
Args: color (str): Color name.
|
| 242 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 243 |
+
"""
|
| 244 |
+
color = color.lower()
|
| 245 |
+
if color == "white":
|
| 246 |
+
self.change_image(0)
|
| 247 |
+
elif color == "green":
|
| 248 |
+
self.change_image(1)
|
| 249 |
+
elif color == "red":
|
| 250 |
+
self.change_image(2)
|
| 251 |
+
|
| 252 |
+
return pt.common.Status.SUCCESS
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def is_agent_in_nest(self):
|
| 256 |
+
"""
|
| 257 |
+
Condition node: Determine if the agent is in the nest.
|
| 258 |
+
Returns: SUCCESS if the agent is in the nest, FAILURE otherwise.
|
| 259 |
+
"""
|
| 260 |
+
distance = math.dist(self.nest_pos, self.pos)
|
| 261 |
+
if distance <= 17 and (self.target_reached_flag==True or self.target_detected_flag == True or self.state == "completed" ) :
|
| 262 |
+
self.state = "seeking"
|
| 263 |
+
# self.target_detected_flag = False
|
| 264 |
+
# self.target_reached_flag = False
|
| 265 |
+
self.is_agent_in_nest_flag = True
|
| 266 |
+
|
| 267 |
+
if self.is_agent_in_nest_flag:
|
| 268 |
+
return pt.common.Status.SUCCESS
|
| 269 |
+
return pt.common.Status.FAILURE
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def agent_movement_freeze(self):
|
| 273 |
+
"""
|
| 274 |
+
Action node: Freeze the agent's movement, typically to indicate a stop in activity.
|
| 275 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 276 |
+
"""
|
| 277 |
+
self.freeze_movement()
|
| 278 |
+
return pt.common.Status.SUCCESS
|
| 279 |
+
|
| 280 |
+
def continue_movement_agent(self):
|
| 281 |
+
"""
|
| 282 |
+
Action node: Continue the agent's movement after it has been previously frozen.
|
| 283 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 284 |
+
"""
|
| 285 |
+
self.continue_movement()
|
| 286 |
+
return pt.common.Status.SUCCESS
|
| 287 |
+
|
| 288 |
+
def move_randomly(self):
|
| 289 |
+
"""
|
| 290 |
+
Action node: Perform a wandering action where the agent moves randomly within the environment.
|
| 291 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 292 |
+
"""
|
| 293 |
+
Agent.change_position(self)
|
| 294 |
+
return pt.common.Status.SUCCESS
|
| 295 |
+
|
| 296 |
+
def is_path_clear(self):
|
| 297 |
+
"""
|
| 298 |
+
Condition node: Check if the path ahead of the agent is clear of obstacles.
|
| 299 |
+
Returns: SUCCESS if no obstacles are detected ahead, FAILURE if obstacles are present.
|
| 300 |
+
"""
|
| 301 |
+
# return not self.obstacle()
|
| 302 |
+
|
| 303 |
+
if not self.obstacle():
|
| 304 |
+
return pt.common.Status.SUCCESS
|
| 305 |
+
else:
|
| 306 |
+
return pt.common.Status.FAILURE
|
| 307 |
+
|
| 308 |
+
def is_line_formed(self):
|
| 309 |
+
"""
|
| 310 |
+
Condition node: Determine if the agent has formed a line with a reference point at the center of the window.
|
| 311 |
+
Returns: SUCCESS if the line is formed with the center, FAILURE otherwise.
|
| 312 |
+
"""
|
| 313 |
+
center_x = self.config.window.width / 2
|
| 314 |
+
direction = Vector2(center_x, self.pos.y) - self.pos
|
| 315 |
+
if direction.length() > 0.5:
|
| 316 |
+
return pt.common.Status.FAILURE
|
| 317 |
+
return pt.common.Status.SUCCESS
|
| 318 |
+
|
| 319 |
+
def form_line(self):
|
| 320 |
+
"""
|
| 321 |
+
Action node: Direct the agent to form a line towards the center of the window. This function adjuststhe agent's position to align it with the center.
|
| 322 |
+
Returns: Always returns SUCCESS, indicating the action was executed.
|
| 323 |
+
"""
|
| 324 |
+
# print("form_line")
|
| 325 |
+
center_x = self.config.window.width / 2
|
| 326 |
+
direction = Vector2(center_x, self.pos.y) - self.pos
|
| 327 |
+
if direction.length() > 0.5:
|
| 328 |
+
direction.scale_to_length(self.config.movement_speed)
|
| 329 |
+
self.pos += direction
|
| 330 |
+
return pt.common.Status.SUCCESS
|
| 331 |
+
|
| 332 |
+
# def task_completed(self):
|
| 333 |
+
# """
|
| 334 |
+
# Action node: Signal that the agent has completed its designated task. Returns: Always returns True, indicating that the task completion action was executed.
|
| 335 |
+
# """
|
| 336 |
+
# self.state = "completed"
|
| 337 |
+
# return pt.common.Status.SUCCESS
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class StreamableSimulation(HeadlessSimulation):
|
| 348 |
+
"""Modified Simulation class that captures frames for streaming."""
|
| 349 |
+
def __init__(self, config: Optional[Config] = None):
|
| 350 |
+
super().__init__(config)
|
| 351 |
+
pg.init()
|
| 352 |
+
|
| 353 |
+
size = self.config.window.as_tuple()
|
| 354 |
+
self._screen = pg.Surface(size, pg.SRCALPHA)
|
| 355 |
+
self._background = pg.Surface(size, pg.SRCALPHA)
|
| 356 |
+
self._background.fill((0, 0, 0))
|
| 357 |
+
|
| 358 |
+
self.frame_queue = Queue(maxsize=30)
|
| 359 |
+
self.running = True
|
| 360 |
+
self._frame_lock = threading.Lock()
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def get_frame(self):
|
| 364 |
+
with self._frame_lock:
|
| 365 |
+
surf_copy = self._screen.copy()
|
| 366 |
+
frame = np.array(pg.surfarray.pixels3d(surf_copy))
|
| 367 |
+
return np.transpose(frame, (1, 0, 2))
|
| 368 |
+
|
| 369 |
+
def tick(self):
|
| 370 |
+
"""Run a simulation step and capture frames."""
|
| 371 |
+
super().tick()
|
| 372 |
+
|
| 373 |
+
with self._frame_lock:
|
| 374 |
+
self._screen.blit(self._background, (0, 0))
|
| 375 |
+
for sprite in self._all.sprites():
|
| 376 |
+
self._screen.blit(sprite.image, sprite.rect)
|
| 377 |
+
|
| 378 |
+
try:
|
| 379 |
+
frame = self.get_frame()
|
| 380 |
+
self.frame_queue.put(frame, block=False)
|
| 381 |
+
except Queue.Full:
|
| 382 |
+
print("Frame queue is full. Dropping frame.")
|
| 383 |
+
|
| 384 |
+
# def _load_image(self, path: str) -> pg.surface.Surface:
|
| 385 |
+
# """Load an image from the given path."""
|
| 386 |
+
# return pg.image.load(path)
|
| 387 |
+
|
| 388 |
+
def _load_image(self, paths):
|
| 389 |
+
"""Load one or more images from given paths."""
|
| 390 |
+
if isinstance(paths, str): # If it's a single string, load normally
|
| 391 |
+
return pg.image.load(paths)
|
| 392 |
+
elif isinstance(paths, list): # If it's a list, load all images
|
| 393 |
+
return [pg.image.load(path) for path in paths]
|
| 394 |
+
raise TypeError("Expected a string (file path) or a list of file paths")
|
| 395 |
+
|
| 396 |
+
def stop(self):
|
| 397 |
+
"""Stop the simulation."""
|
| 398 |
+
# Do not try to call self.bt.stop() because simulation does not own a BT.
|
| 399 |
+
# self.running = False
|
| 400 |
+
super().stop()
|
| 401 |
+
pg.quit() # Quit the Pygame environment
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
# if __name__=="__main__":
|
| 409 |
+
|
| 410 |
+
# # Define nest and target positions
|
| 411 |
+
# nest_x, nest_y = 450, 400
|
| 412 |
+
# target_x, target_y = 200, 100
|
| 413 |
+
# nest_pos = Vector2(nest_x, nest_y)
|
| 414 |
+
# target_pos = Vector2(target_x, target_y)
|
| 415 |
+
|
| 416 |
+
# # Load images for agents
|
| 417 |
+
# agent_images_paths = ["./images/white.png", "./images/green.png", "./images/red circle.png"]
|
| 418 |
+
|
| 419 |
+
# config = MyConfig(radius=250, visualise_chunks=True, movement_speed=2)
|
| 420 |
+
# sim = StreamableSimulation(config=config)
|
| 421 |
+
|
| 422 |
+
# # Load images
|
| 423 |
+
# loaded_agent_images = sim._load_image(agent_images_paths)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
# # Initialize agents with behavior tree parsing
|
| 428 |
+
# for _ in range(50):
|
| 429 |
+
# agent = SwarmAgent(
|
| 430 |
+
# images=loaded_agent_images,
|
| 431 |
+
# simulation=sim,
|
| 432 |
+
# pos=Vector2(nest_x, nest_y),
|
| 433 |
+
# nest_pos=nest_pos,
|
| 434 |
+
# target_pos=target_pos,
|
| 435 |
+
# )
|
| 436 |
+
# sim._agents.add(agent)
|
| 437 |
+
# sim._all.add(agent)
|
| 438 |
+
# # Draw environment elements
|
| 439 |
+
# sim.spawn_obstacle("./images/rect_obst.png", 350, 100)
|
| 440 |
+
# sim.spawn_obstacle("./images/rect_obst (1).png", 100, 350)
|
| 441 |
+
# sim.spawn_site("./images/rect.png", target_x, target_y)
|
| 442 |
+
# sim.spawn_site("./images/nest.png", nest_x, nest_y)
|
| 443 |
+
|
| 444 |
+
# for agent in sim._agents:
|
| 445 |
+
# agent.bt.tick_once()
|
| 446 |
+
|
| 447 |
+
# # Then run your simulation loop without ticking the BT further.
|
| 448 |
+
# while sim.running:
|
| 449 |
+
# sim.tick()
|
| 450 |
+
# if not sim.frame_queue.empty():
|
| 451 |
+
# frame = sim.frame_queue.get()
|
| 452 |
+
# # update_frame(frame) or display the frame as needed.
|
| 453 |
+
# time.sleep(1/30)
|
speech_processing.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import SeamlessM4Tv2Model, AutoProcessor
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from pydub import AudioSegment
|
| 5 |
+
|
| 6 |
+
# Load processor and model
|
| 7 |
+
processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")
|
| 8 |
+
model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
|
| 9 |
+
|
| 10 |
+
def translate_audio(audio_file):
|
| 11 |
+
if audio_file is None:
|
| 12 |
+
return "No audio file detected. Please try again."
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
# Set the device (use GPU if available)
|
| 16 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 17 |
+
model.to(device)
|
| 18 |
+
|
| 19 |
+
# Reset audio file pointer and load audio
|
| 20 |
+
audio = AudioSegment.from_file(audio_file, format="wav")
|
| 21 |
+
audio = audio.set_frame_rate(16000).set_channels(1)
|
| 22 |
+
|
| 23 |
+
# Convert audio to float32 NumPy array
|
| 24 |
+
audio_array = np.array(audio.get_array_of_samples()).astype(np.float32) / 32768.0
|
| 25 |
+
|
| 26 |
+
# Process input
|
| 27 |
+
audio_inputs = processor(audios=audio_array, sampling_rate=16000, return_tensors="pt")
|
| 28 |
+
audio_inputs = {key: val.to(device) for key, val in audio_inputs.items()} # Ensure tensors are on the correct device
|
| 29 |
+
|
| 30 |
+
# Generate translation
|
| 31 |
+
output_tokens = model.generate(**audio_inputs, tgt_lang="eng", generate_speech=False)
|
| 32 |
+
|
| 33 |
+
# Extract token IDs from the generated output
|
| 34 |
+
token_ids = output_tokens.sequences
|
| 35 |
+
# Decode token IDs to text
|
| 36 |
+
translated_text_from_audio = processor.batch_decode(token_ids, skip_special_tokens=True)[0]
|
| 37 |
+
|
| 38 |
+
return translated_text_from_audio
|
| 39 |
+
except Exception as e:
|
| 40 |
+
return f"Error during audio translation: {e}"
|
text_processing.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_cpp import Llama
|
| 2 |
+
from huggingface_hub import hf_hub_download
|
| 3 |
+
|
| 4 |
+
# Download the single GGUF shard by its repo path:
|
| 5 |
+
model_path = hf_hub_download(
|
| 6 |
+
repo_id="Inventors-Hub/SwarmChat-models",
|
| 7 |
+
repo_type="model",
|
| 8 |
+
filename="EuroLLM-9B-Instruct-Q4_K_M.gguf",
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# llm = Llama(model_path=model_path, n_ctx=1024)#, verbose=True)
|
| 12 |
+
llm = Llama(
|
| 13 |
+
model_path=model_path,
|
| 14 |
+
n_ctx=512, # down from 4096
|
| 15 |
+
low_vram=True, # llama.cpp low-vram mode
|
| 16 |
+
f16_kv=True, # half-precision kv cache
|
| 17 |
+
use_mmap=True, # mmap file
|
| 18 |
+
use_mlock=False,
|
| 19 |
+
)
|
| 20 |
+
# print("Llama backend initialized successfully!")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Function to process text using EuroLLM
|
| 25 |
+
def translate_text(text):
|
| 26 |
+
input_prompt = f"""
|
| 27 |
+
<|im_start|>system
|
| 28 |
+
<|im_end|>
|
| 29 |
+
<|im_start|>user
|
| 30 |
+
Translate the following text to English:
|
| 31 |
+
Text: {text}
|
| 32 |
+
English:
|
| 33 |
+
<|im_end|>
|
| 34 |
+
<|im_start|>assistant
|
| 35 |
+
"""
|
| 36 |
+
output = llm(input_prompt, max_tokens=1024, temperature=0)
|
| 37 |
+
|
| 38 |
+
translated_text = output.get("choices", [{}])[0].get("text", "").strip()
|
| 39 |
+
|
| 40 |
+
return translated_text
|
tree.xml
ADDED
|
File without changes
|