Commit
Β·
d69447e
0
Parent(s):
Initial commit: FleetMind MCP with GitHub Actions auto-sync
Browse files- Complete FleetMind AI Dispatch Coordinator project
- Autonomous agent with Gemini 2.0 Flash
- Natural language order and driver creation
- GitHub Actions workflow for auto-sync to HF Space
- Team collaboration setup
MCP 1st Birthday Hackathon - Track: MCP in Action
- .env.example +28 -0
- .github/workflows/check-file-size.yml +16 -0
- .github/workflows/sync-to-huggingface.yml +24 -0
- .gitignore +59 -0
- DRIVER_CREATION_GUIDE.md +318 -0
- GITHUB_ACTIONS_SETUP.md +315 -0
- QUICK_START.md +138 -0
- README.md +195 -0
- TEAM_COLLABORATION_GUIDE.md +305 -0
- app.py +27 -0
- chat/__init__.py +9 -0
- chat/chat_engine.py +108 -0
- chat/conversation.py +85 -0
- chat/geocoding.py +131 -0
- chat/providers/__init__.py +10 -0
- chat/providers/base_provider.py +53 -0
- chat/providers/claude_provider.py +271 -0
- chat/providers/gemini_provider.py +551 -0
- chat/tools.py +350 -0
- database/__init__.py +8 -0
- database/connection.py +219 -0
- database/migrations/001_create_drivers_table.py +81 -0
- database/migrations/__init__.py +3 -0
- database/schema.py +212 -0
- flow.md +828 -0
- requirements.txt +27 -0
- scripts/init_db.py +115 -0
- scripts/test_db.py +169 -0
- ui/__init__.py +4 -0
- ui/app.py +542 -0
.env.example
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Provider Selection (choose one: "anthropic" or "gemini")
|
| 2 |
+
AI_PROVIDER=anthropic
|
| 3 |
+
|
| 4 |
+
# API Keys for AI Providers
|
| 5 |
+
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
| 6 |
+
GOOGLE_API_KEY=your_google_api_key_here
|
| 7 |
+
|
| 8 |
+
# HERE Maps API Key (for geocoding)
|
| 9 |
+
HERE_API_KEY=your_here_api_key_here
|
| 10 |
+
|
| 11 |
+
# PostgreSQL Database Configuration
|
| 12 |
+
DB_HOST=localhost
|
| 13 |
+
DB_PORT=5432
|
| 14 |
+
DB_NAME=fleetmind
|
| 15 |
+
DB_USER=postgres
|
| 16 |
+
DB_PASSWORD=your_password_here
|
| 17 |
+
|
| 18 |
+
# MCP Server
|
| 19 |
+
MCP_SERVER_NAME=dispatch-coordinator-mcp
|
| 20 |
+
MCP_SERVER_VERSION=1.0.0
|
| 21 |
+
|
| 22 |
+
# Gradio
|
| 23 |
+
GRADIO_SERVER_PORT=7860
|
| 24 |
+
GRADIO_SHARE=false
|
| 25 |
+
|
| 26 |
+
# Logging
|
| 27 |
+
LOG_LEVEL=INFO
|
| 28 |
+
LOG_FILE=logs/fleetmind.log
|
.github/workflows/check-file-size.yml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Check file size
|
| 2 |
+
on:
|
| 3 |
+
pull_request:
|
| 4 |
+
branches: [main]
|
| 5 |
+
|
| 6 |
+
# Allow manual trigger from Actions tab
|
| 7 |
+
workflow_dispatch:
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
check-file-size:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
steps:
|
| 13 |
+
- name: Check large files
|
| 14 |
+
uses: ActionsDesk/lfs-warning@v2.0
|
| 15 |
+
with:
|
| 16 |
+
filesizelimit: 10485760 # 10MB limit for HF Spaces sync
|
.github/workflows/sync-to-huggingface.yml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face Space
|
| 2 |
+
on:
|
| 3 |
+
push:
|
| 4 |
+
branches: [main]
|
| 5 |
+
|
| 6 |
+
# Allow manual trigger from Actions tab
|
| 7 |
+
workflow_dispatch:
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
sync-to-hub:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
steps:
|
| 13 |
+
- uses: actions/checkout@v3
|
| 14 |
+
with:
|
| 15 |
+
fetch-depth: 0
|
| 16 |
+
lfs: true
|
| 17 |
+
|
| 18 |
+
- name: Push to Hugging Face Space
|
| 19 |
+
env:
|
| 20 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 21 |
+
run: |
|
| 22 |
+
git config --global user.email "github-actions[bot]@users.noreply.github.com"
|
| 23 |
+
git config --global user.name "github-actions[bot]"
|
| 24 |
+
git push https://MCP-1st-Birthday:$HF_TOKEN@huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai main --force
|
.gitignore
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
*.egg-info/
|
| 20 |
+
.installed.cfg
|
| 21 |
+
*.egg
|
| 22 |
+
|
| 23 |
+
# Virtual Environment
|
| 24 |
+
venv/
|
| 25 |
+
env/
|
| 26 |
+
ENV/
|
| 27 |
+
|
| 28 |
+
# Database
|
| 29 |
+
*.db
|
| 30 |
+
*.sqlite
|
| 31 |
+
*.sqlite3
|
| 32 |
+
data/*.db
|
| 33 |
+
|
| 34 |
+
# Environment Variables
|
| 35 |
+
.env
|
| 36 |
+
|
| 37 |
+
# IDE
|
| 38 |
+
.vscode/
|
| 39 |
+
.idea/
|
| 40 |
+
*.swp
|
| 41 |
+
*.swo
|
| 42 |
+
*~
|
| 43 |
+
|
| 44 |
+
# Logs
|
| 45 |
+
logs/
|
| 46 |
+
*.log
|
| 47 |
+
|
| 48 |
+
# OS
|
| 49 |
+
.DS_Store
|
| 50 |
+
Thumbs.db
|
| 51 |
+
|
| 52 |
+
# Testing
|
| 53 |
+
.pytest_cache/
|
| 54 |
+
.coverage
|
| 55 |
+
htmlcov/
|
| 56 |
+
|
| 57 |
+
# Gradio
|
| 58 |
+
gradio_cached_examples/
|
| 59 |
+
flagged/
|
DRIVER_CREATION_GUIDE.md
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Driver Creation Feature Guide
|
| 2 |
+
|
| 3 |
+
## β
**Feature Status: READY**
|
| 4 |
+
|
| 5 |
+
The driver creation feature has been successfully implemented and tested!
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## π **How to Use**
|
| 10 |
+
|
| 11 |
+
### **Step 1: Restart Your Application**
|
| 12 |
+
|
| 13 |
+
Since we updated Gemini's system prompt and tools, restart the app:
|
| 14 |
+
|
| 15 |
+
```bash
|
| 16 |
+
# Stop the current app (Ctrl+C)
|
| 17 |
+
python ui/app.py
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
### **Step 2: Create Drivers Using Natural Language**
|
| 23 |
+
|
| 24 |
+
Open the chat at http://127.0.0.1:7860 and type naturally!
|
| 25 |
+
|
| 26 |
+
---
|
| 27 |
+
|
| 28 |
+
## π **Example Commands**
|
| 29 |
+
|
| 30 |
+
### **Example 1: Complete Driver Info**
|
| 31 |
+
|
| 32 |
+
```
|
| 33 |
+
Add new driver Tom Wilson, phone 555-0101, drives a van, plate ABC-123
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
**Gemini will create:**
|
| 37 |
+
- Driver ID: DRV-20251114HHMMSS (auto-generated)
|
| 38 |
+
- Name: Tom Wilson
|
| 39 |
+
- Phone: 555-0101
|
| 40 |
+
- Vehicle: van
|
| 41 |
+
- Plate: ABC-123
|
| 42 |
+
- Status: active (default)
|
| 43 |
+
- Capacity: 1000 kg (default for van)
|
| 44 |
+
|
| 45 |
+
---
|
| 46 |
+
|
| 47 |
+
### **Example 2: Driver with Skills**
|
| 48 |
+
|
| 49 |
+
```
|
| 50 |
+
Create driver Sarah Martinez, phone 555-0202, refrigerated truck, plate XYZ-789,
|
| 51 |
+
skills: medical_certified, refrigerated
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
**Gemini will create:**
|
| 55 |
+
- Name: Sarah Martinez
|
| 56 |
+
- Phone: 555-0202
|
| 57 |
+
- Vehicle: truck
|
| 58 |
+
- Plate: XYZ-789
|
| 59 |
+
- Skills: ["medical_certified", "refrigerated"]
|
| 60 |
+
- Capacity: 1000 kg (default)
|
| 61 |
+
|
| 62 |
+
---
|
| 63 |
+
|
| 64 |
+
### **Example 3: Minimal Info (Name Only)**
|
| 65 |
+
|
| 66 |
+
```
|
| 67 |
+
Add driver Mike Chen
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
**Gemini will create:**
|
| 71 |
+
- Name: Mike Chen
|
| 72 |
+
- Vehicle: van (default)
|
| 73 |
+
- Capacity: 1000 kg (default)
|
| 74 |
+
- Status: active (default)
|
| 75 |
+
- Skills: [] (empty by default)
|
| 76 |
+
|
| 77 |
+
Gemini might ask: "Would you like to provide phone number or vehicle details?"
|
| 78 |
+
|
| 79 |
+
---
|
| 80 |
+
|
| 81 |
+
### **Example 4: Motorcycle Courier**
|
| 82 |
+
|
| 83 |
+
```
|
| 84 |
+
New driver: Lisa Anderson, phone 555-0303, motorcycle, express delivery specialist
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
**Gemini will create:**
|
| 88 |
+
- Name: Lisa Anderson
|
| 89 |
+
- Phone: 555-0303
|
| 90 |
+
- Vehicle: motorcycle
|
| 91 |
+
- Skills: ["express_delivery"]
|
| 92 |
+
- Capacity: 50 kg (you can specify)
|
| 93 |
+
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
## π― **Available Fields**
|
| 97 |
+
|
| 98 |
+
### **Required:**
|
| 99 |
+
- β
**name** - Driver's full name
|
| 100 |
+
|
| 101 |
+
### **Optional:**
|
| 102 |
+
- **phone** - Contact number (e.g., "+1-555-0101")
|
| 103 |
+
- **email** - Email address (e.g., "driver@fleet.com")
|
| 104 |
+
- **vehicle_type** - van | truck | car | motorcycle (default: van)
|
| 105 |
+
- **vehicle_plate** - License plate (e.g., "ABC-1234")
|
| 106 |
+
- **capacity_kg** - Cargo weight capacity in kg (default: 1000.0)
|
| 107 |
+
- **capacity_m3** - Cargo volume capacity in mΒ³ (default: 12.0)
|
| 108 |
+
- **skills** - List of certifications/skills:
|
| 109 |
+
- `refrigerated` - Can handle cold storage
|
| 110 |
+
- `medical_certified` - Medical deliveries
|
| 111 |
+
- `fragile_handler` - Fragile items expert
|
| 112 |
+
- `overnight` - Overnight/late deliveries
|
| 113 |
+
- `express_delivery` - Express/rush deliveries
|
| 114 |
+
- **status** - active | busy | offline | unavailable (default: active)
|
| 115 |
+
|
| 116 |
+
---
|
| 117 |
+
|
| 118 |
+
## π§ͺ **Testing**
|
| 119 |
+
|
| 120 |
+
### **Test 1: Verify Creation**
|
| 121 |
+
|
| 122 |
+
After creating a driver, check the database:
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
python verify_drivers.py
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
This will show all drivers including the newly created one.
|
| 129 |
+
|
| 130 |
+
---
|
| 131 |
+
|
| 132 |
+
### **Test 2: Check in UI**
|
| 133 |
+
|
| 134 |
+
Go to the **Orders** tab in the UI and you should see new drivers available for assignment.
|
| 135 |
+
|
| 136 |
+
---
|
| 137 |
+
|
| 138 |
+
## π **What Happens Behind the Scenes**
|
| 139 |
+
|
| 140 |
+
### **User Input:**
|
| 141 |
+
```
|
| 142 |
+
"Add new driver Tom Wilson, phone 555-0101, drives a van, plate ABC-123"
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
### **Gemini Processing:**
|
| 146 |
+
1. **Parses** your natural language input
|
| 147 |
+
2. **Extracts** driver information:
|
| 148 |
+
- name: "Tom Wilson"
|
| 149 |
+
- phone: "555-0101"
|
| 150 |
+
- vehicle_type: "van"
|
| 151 |
+
- vehicle_plate: "ABC-123"
|
| 152 |
+
3. **Calls** `create_driver` tool with extracted data
|
| 153 |
+
4. **Database** inserts the driver with auto-generated ID
|
| 154 |
+
5. **Returns** confirmation message
|
| 155 |
+
|
| 156 |
+
### **Database Record Created:**
|
| 157 |
+
```sql
|
| 158 |
+
INSERT INTO drivers (
|
| 159 |
+
driver_id, -- DRV-20251114113250 (auto)
|
| 160 |
+
name, -- Tom Wilson
|
| 161 |
+
phone, -- 555-0101
|
| 162 |
+
vehicle_type, -- van
|
| 163 |
+
vehicle_plate, -- ABC-123
|
| 164 |
+
status, -- active (default)
|
| 165 |
+
capacity_kg, -- 1000.0 (default)
|
| 166 |
+
capacity_m3, -- 12.0 (default)
|
| 167 |
+
skills, -- [] (empty)
|
| 168 |
+
current_lat, -- 37.7749 (default SF)
|
| 169 |
+
current_lng, -- -122.4194 (default SF)
|
| 170 |
+
last_location_update -- 2025-11-14 11:32:50
|
| 171 |
+
) VALUES (...)
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
---
|
| 175 |
+
|
| 176 |
+
## π **Comparison: Orders vs Drivers**
|
| 177 |
+
|
| 178 |
+
### **Creating an Order:**
|
| 179 |
+
```
|
| 180 |
+
User: "Create order for John Doe, 123 Main St SF, phone 555-1234"
|
| 181 |
+
β
|
| 182 |
+
Gemini: [geocode_address] β [create_order] β "β
Order created!"
|
| 183 |
+
```
|
| 184 |
+
**2 tools called** (geocoding required for addresses)
|
| 185 |
+
|
| 186 |
+
### **Creating a Driver:**
|
| 187 |
+
```
|
| 188 |
+
User: "Add driver Tom Wilson, phone 555-0101, van"
|
| 189 |
+
β
|
| 190 |
+
Gemini: [create_driver] β "β
Driver created!"
|
| 191 |
+
```
|
| 192 |
+
**1 tool called** (no geocoding needed)
|
| 193 |
+
|
| 194 |
+
---
|
| 195 |
+
|
| 196 |
+
## β‘ **Quick Reference**
|
| 197 |
+
|
| 198 |
+
### **Conversational Examples:**
|
| 199 |
+
|
| 200 |
+
β
"I need to onboard a new driver named Alex"
|
| 201 |
+
β
"Add Sarah to the fleet, she drives a truck"
|
| 202 |
+
β
"New driver: Mike, phone 555-9999, motorcycle"
|
| 203 |
+
β
"Create driver with medical certification"
|
| 204 |
+
β
"Add a refrigerated truck driver named Bob"
|
| 205 |
+
|
| 206 |
+
### **Structured Examples:**
|
| 207 |
+
|
| 208 |
+
β
"Create driver: Name: Tom Wilson, Phone: 555-0101, Vehicle: van, Plate: ABC-123"
|
| 209 |
+
β
"New driver - Name: Sarah, Email: sarah@fleet.com, Vehicle type: truck, Skills: refrigerated, medical_certified"
|
| 210 |
+
|
| 211 |
+
---
|
| 212 |
+
|
| 213 |
+
## π¨ **Sample Responses from Gemini**
|
| 214 |
+
|
| 215 |
+
### **Successful Creation:**
|
| 216 |
+
```
|
| 217 |
+
β
Driver DRV-20251114113250 created successfully!
|
| 218 |
+
|
| 219 |
+
Driver Details:
|
| 220 |
+
β’ Driver ID: DRV-20251114113250
|
| 221 |
+
β’ Name: Tom Wilson
|
| 222 |
+
β’ Phone: 555-0101
|
| 223 |
+
β’ Vehicle: van (ABC-123)
|
| 224 |
+
β’ Capacity: 1000 kg
|
| 225 |
+
β’ Status: Active
|
| 226 |
+
β’ Skills: None
|
| 227 |
+
|
| 228 |
+
The driver has been added to your fleet and is ready for order assignments!
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
### **Missing Information:**
|
| 232 |
+
```
|
| 233 |
+
I can create a driver for you! I have:
|
| 234 |
+
β’ Name: Tom Wilson
|
| 235 |
+
|
| 236 |
+
To complete the driver profile, please provide (optional):
|
| 237 |
+
β’ Phone number
|
| 238 |
+
β’ Vehicle type (van/truck/car/motorcycle)
|
| 239 |
+
β’ License plate number
|
| 240 |
+
β’ Any special skills or certifications
|
| 241 |
+
|
| 242 |
+
Or I can create the driver now with default settings?
|
| 243 |
+
```
|
| 244 |
+
|
| 245 |
+
---
|
| 246 |
+
|
| 247 |
+
## π οΈ **Technical Details**
|
| 248 |
+
|
| 249 |
+
### **Function:** `handle_create_driver()`
|
| 250 |
+
**Location:** `chat/tools.py:245-331`
|
| 251 |
+
|
| 252 |
+
### **Tool Definition:**
|
| 253 |
+
**Location:** `chat/providers/gemini_provider.py:140-186`
|
| 254 |
+
|
| 255 |
+
### **System Prompt:**
|
| 256 |
+
**Location:** `chat/providers/gemini_provider.py:32-89`
|
| 257 |
+
|
| 258 |
+
---
|
| 259 |
+
|
| 260 |
+
## β¨ **Next Steps**
|
| 261 |
+
|
| 262 |
+
After creating drivers, you can:
|
| 263 |
+
|
| 264 |
+
1. **Assign orders to drivers** (coming soon)
|
| 265 |
+
2. **View driver list** in the UI
|
| 266 |
+
3. **Update driver status** (active/busy/offline)
|
| 267 |
+
4. **Track driver locations** (coming soon)
|
| 268 |
+
|
| 269 |
+
---
|
| 270 |
+
|
| 271 |
+
## π **Troubleshooting**
|
| 272 |
+
|
| 273 |
+
### **Issue: "Unknown tool: create_driver"**
|
| 274 |
+
|
| 275 |
+
**Solution:** Restart the application to reload the new tools:
|
| 276 |
+
```bash
|
| 277 |
+
# Stop app (Ctrl+C)
|
| 278 |
+
python ui/app.py
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
### **Issue: Driver created but not showing in database**
|
| 282 |
+
|
| 283 |
+
**Solution:** Check database connection and verify:
|
| 284 |
+
```bash
|
| 285 |
+
python verify_drivers.py
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
### **Issue: "Missing required field: name"**
|
| 289 |
+
|
| 290 |
+
**Solution:** Make sure you provide at least the driver's name:
|
| 291 |
+
```
|
| 292 |
+
"Add driver John Smith" β
|
| 293 |
+
"Add a new driver" β (no name)
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
---
|
| 297 |
+
|
| 298 |
+
## π **Feature Comparison**
|
| 299 |
+
|
| 300 |
+
| Feature | Orders | Drivers |
|
| 301 |
+
|---------|--------|---------|
|
| 302 |
+
| **Required Fields** | 3 (name, address, contact) | 1 (name) |
|
| 303 |
+
| **Geocoding Needed** | β
Yes | β No |
|
| 304 |
+
| **Tools Called** | 2 (geocode + create) | 1 (create) |
|
| 305 |
+
| **Default Values** | Priority, weight | Vehicle type, capacity, status |
|
| 306 |
+
| **Complex Data** | Time windows, coordinates | Skills array, JSON |
|
| 307 |
+
|
| 308 |
+
---
|
| 309 |
+
|
| 310 |
+
## π **Ready to Use!**
|
| 311 |
+
|
| 312 |
+
Your FleetMind system can now:
|
| 313 |
+
- β
Create customer orders
|
| 314 |
+
- β
Create delivery drivers
|
| 315 |
+
- β
Geocode addresses
|
| 316 |
+
- β
Store everything in PostgreSQL
|
| 317 |
+
|
| 318 |
+
Just talk naturally to Gemini and it handles the rest! π
|
GITHUB_ACTIONS_SETUP.md
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GitHub Actions Auto-Sync Setup Guide
|
| 2 |
+
|
| 3 |
+
This guide walks you through setting up automatic synchronization from your GitHub repository to your Hugging Face Space using GitHub Actions.
|
| 4 |
+
|
| 5 |
+
## β
What's Already Done
|
| 6 |
+
|
| 7 |
+
- [x] Created GitHub repository directory: `F:\github-fleetmind-team`
|
| 8 |
+
- [x] Copied all HF Space code to GitHub repo
|
| 9 |
+
- [x] Created GitHub Actions workflows:
|
| 10 |
+
- `.github/workflows/sync-to-huggingface.yml` - Auto-sync on push
|
| 11 |
+
- `.github/workflows/check-file-size.yml` - Check file sizes on PRs
|
| 12 |
+
|
| 13 |
+
## π― What You Need to Do Now
|
| 14 |
+
|
| 15 |
+
### STEP 3: Get Your Hugging Face Token
|
| 16 |
+
|
| 17 |
+
1. **Go to:** https://huggingface.co/settings/tokens
|
| 18 |
+
|
| 19 |
+
2. **Click:** "Create new token"
|
| 20 |
+
|
| 21 |
+
3. **Fill in:**
|
| 22 |
+
- Name: `GitHub Actions Sync`
|
| 23 |
+
- Type: **Write** (important!)
|
| 24 |
+
- Scope: Select **all** or at least:
|
| 25 |
+
- β
Write access to repos
|
| 26 |
+
- β
Write access to spaces
|
| 27 |
+
|
| 28 |
+
4. **Click:** "Generate token"
|
| 29 |
+
|
| 30 |
+
5. **COPY THE TOKEN** (you won't see it again!)
|
| 31 |
+
- It looks like: `hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`
|
| 32 |
+
|
| 33 |
+
---
|
| 34 |
+
|
| 35 |
+
### STEP 4: Push Code to GitHub
|
| 36 |
+
|
| 37 |
+
Open PowerShell or Command Prompt:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
# Navigate to the GitHub repo
|
| 41 |
+
cd F:\github-fleetmind-team
|
| 42 |
+
|
| 43 |
+
# Add all files
|
| 44 |
+
git add .
|
| 45 |
+
|
| 46 |
+
# Commit
|
| 47 |
+
git commit -m "Initial commit: FleetMind MCP with GitHub Actions sync"
|
| 48 |
+
|
| 49 |
+
# Add GitHub remote (REPLACE YOUR-USERNAME with your actual GitHub username)
|
| 50 |
+
git remote add origin https://github.com/YOUR-USERNAME/fleetmind-mcp.git
|
| 51 |
+
|
| 52 |
+
# Push to GitHub
|
| 53 |
+
git push -u origin main
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
**If you get an error about 'main' branch:**
|
| 57 |
+
```bash
|
| 58 |
+
# Rename branch to main
|
| 59 |
+
git branch -M main
|
| 60 |
+
|
| 61 |
+
# Push again
|
| 62 |
+
git push -u origin main
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
---
|
| 66 |
+
|
| 67 |
+
### STEP 5: Add HF_TOKEN as GitHub Secret
|
| 68 |
+
|
| 69 |
+
1. **Go to your GitHub repo:**
|
| 70 |
+
- URL: `https://github.com/YOUR-USERNAME/fleetmind-mcp`
|
| 71 |
+
|
| 72 |
+
2. **Click:** Settings (top right of repo page)
|
| 73 |
+
|
| 74 |
+
3. **In left sidebar, click:**
|
| 75 |
+
- Secrets and variables β Actions
|
| 76 |
+
|
| 77 |
+
4. **Click:** "New repository secret"
|
| 78 |
+
|
| 79 |
+
5. **Fill in:**
|
| 80 |
+
- Name: `HF_TOKEN`
|
| 81 |
+
- Secret: Paste the Hugging Face token you copied in Step 3
|
| 82 |
+
|
| 83 |
+
6. **Click:** "Add secret"
|
| 84 |
+
|
| 85 |
+
---
|
| 86 |
+
|
| 87 |
+
### STEP 6: Add HF Space as Git Remote (Optional but Recommended)
|
| 88 |
+
|
| 89 |
+
This allows you to manually push to HF Space if needed:
|
| 90 |
+
|
| 91 |
+
```bash
|
| 92 |
+
cd F:\github-fleetmind-team
|
| 93 |
+
|
| 94 |
+
# Add HF Space as a remote
|
| 95 |
+
git remote add space https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 96 |
+
|
| 97 |
+
# Verify remotes
|
| 98 |
+
git remote -v
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
You should see:
|
| 102 |
+
```
|
| 103 |
+
origin https://github.com/YOUR-USERNAME/fleetmind-mcp.git (fetch)
|
| 104 |
+
origin https://github.com/YOUR-USERNAME/fleetmind-mcp.git (push)
|
| 105 |
+
space https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai (fetch)
|
| 106 |
+
space https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai (push)
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
---
|
| 110 |
+
|
| 111 |
+
### STEP 7: Test the Auto-Sync
|
| 112 |
+
|
| 113 |
+
Let's make a test change to verify everything works:
|
| 114 |
+
|
| 115 |
+
```bash
|
| 116 |
+
cd F:\github-fleetmind-team
|
| 117 |
+
|
| 118 |
+
# Make a small change to README
|
| 119 |
+
echo "\n\n## π€ Auto-Synced with GitHub Actions" >> README.md
|
| 120 |
+
|
| 121 |
+
# Commit the change
|
| 122 |
+
git add README.md
|
| 123 |
+
git commit -m "Test: GitHub Actions auto-sync"
|
| 124 |
+
|
| 125 |
+
# Push to GitHub
|
| 126 |
+
git push origin main
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
**What happens next:**
|
| 130 |
+
|
| 131 |
+
1. β
Code pushes to GitHub
|
| 132 |
+
2. β
GitHub Actions triggers automatically
|
| 133 |
+
3. β
Workflow runs and pushes to HF Space
|
| 134 |
+
4. β
HF Space rebuilds with new code
|
| 135 |
+
|
| 136 |
+
**Check the progress:**
|
| 137 |
+
|
| 138 |
+
1. **On GitHub:**
|
| 139 |
+
- Go to: `https://github.com/YOUR-USERNAME/fleetmind-mcp/actions`
|
| 140 |
+
- You'll see "Sync to Hugging Face Space" workflow running
|
| 141 |
+
- Wait for green checkmark β
|
| 142 |
+
|
| 143 |
+
2. **On Hugging Face:**
|
| 144 |
+
- Go to: https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 145 |
+
- Check the "Files" tab - you should see the new commit
|
| 146 |
+
- Space will rebuild automatically
|
| 147 |
+
|
| 148 |
+
---
|
| 149 |
+
|
| 150 |
+
## π Success! Your Setup is Complete
|
| 151 |
+
|
| 152 |
+
From now on:
|
| 153 |
+
|
| 154 |
+
### **Team Members Workflow:**
|
| 155 |
+
|
| 156 |
+
```bash
|
| 157 |
+
# 1. Clone the GitHub repo (one time)
|
| 158 |
+
git clone https://github.com/YOUR-USERNAME/fleetmind-mcp.git
|
| 159 |
+
cd fleetmind-mcp
|
| 160 |
+
|
| 161 |
+
# 2. Make changes
|
| 162 |
+
# ... edit files ...
|
| 163 |
+
|
| 164 |
+
# 3. Commit and push to GitHub
|
| 165 |
+
git add .
|
| 166 |
+
git commit -m "Add new feature"
|
| 167 |
+
git push origin main
|
| 168 |
+
|
| 169 |
+
# 4. GitHub Actions automatically syncs to HF Space
|
| 170 |
+
# β¨ DONE! Nothing else needed!
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
### **What GitHub Actions Does:**
|
| 174 |
+
|
| 175 |
+
Every time someone pushes to the `main` branch on GitHub:
|
| 176 |
+
|
| 177 |
+
1. β
GitHub Actions workflow starts
|
| 178 |
+
2. β
Checks out the code
|
| 179 |
+
3. β
Pushes to HF Space using your HF_TOKEN
|
| 180 |
+
4. β
HF Space rebuilds automatically
|
| 181 |
+
5. β
Your app goes live with the new changes
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
## π Adding Team Members
|
| 186 |
+
|
| 187 |
+
### On GitHub (Full Access):
|
| 188 |
+
|
| 189 |
+
1. Go to: `https://github.com/YOUR-USERNAME/fleetmind-mcp/settings/access`
|
| 190 |
+
2. Click "Add people"
|
| 191 |
+
3. Enter their GitHub username
|
| 192 |
+
4. Select role: **Write** (they can push directly)
|
| 193 |
+
5. Click "Add"
|
| 194 |
+
|
| 195 |
+
### On Hugging Face (Documentation Only):
|
| 196 |
+
|
| 197 |
+
Team members don't need HF access! The GitHub Actions bot handles all HF Space updates using your HF_TOKEN.
|
| 198 |
+
|
| 199 |
+
Just make sure they're listed in the README:
|
| 200 |
+
- Edit `README.md` lines 29-42
|
| 201 |
+
- Add their real names and HF usernames
|
| 202 |
+
- Commit and push
|
| 203 |
+
|
| 204 |
+
---
|
| 205 |
+
|
| 206 |
+
## π§ Troubleshooting
|
| 207 |
+
|
| 208 |
+
### β "Error: Process completed with exit code 128"
|
| 209 |
+
|
| 210 |
+
**Solution:** Check that HF_TOKEN is correctly set in GitHub Secrets
|
| 211 |
+
- Go to: `https://github.com/YOUR-USERNAME/fleetmind-mcp/settings/secrets/actions`
|
| 212 |
+
- Verify `HF_TOKEN` exists
|
| 213 |
+
- If not, add it (see Step 5)
|
| 214 |
+
|
| 215 |
+
### β "Error: failed to push some refs"
|
| 216 |
+
|
| 217 |
+
**Solution:** HF Space has newer commits
|
| 218 |
+
```bash
|
| 219 |
+
# Pull from HF Space first
|
| 220 |
+
git pull space main --allow-unrelated-histories
|
| 221 |
+
|
| 222 |
+
# Then push again
|
| 223 |
+
git push origin main
|
| 224 |
+
```
|
| 225 |
+
|
| 226 |
+
### β GitHub Actions workflow doesn't run
|
| 227 |
+
|
| 228 |
+
**Solution:** Check that workflow file is in correct location
|
| 229 |
+
- Must be: `.github/workflows/sync-to-huggingface.yml`
|
| 230 |
+
- Check GitHub repo β Actions tab
|
| 231 |
+
- Click "I understand my workflows, go ahead and enable them"
|
| 232 |
+
|
| 233 |
+
### β Files larger than 10MB
|
| 234 |
+
|
| 235 |
+
**Solution:** Use Git LFS
|
| 236 |
+
```bash
|
| 237 |
+
# Install Git LFS
|
| 238 |
+
git lfs install
|
| 239 |
+
|
| 240 |
+
# Track large files
|
| 241 |
+
git lfs track "*.psd" # Example: Photoshop files
|
| 242 |
+
git lfs track "*.pkl" # Example: Model files
|
| 243 |
+
|
| 244 |
+
# Commit .gitattributes
|
| 245 |
+
git add .gitattributes
|
| 246 |
+
git commit -m "Add Git LFS tracking"
|
| 247 |
+
git push
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
---
|
| 251 |
+
|
| 252 |
+
## π Advanced: Manual Sync
|
| 253 |
+
|
| 254 |
+
If you ever need to manually sync to HF Space:
|
| 255 |
+
|
| 256 |
+
```bash
|
| 257 |
+
cd F:\github-fleetmind-team
|
| 258 |
+
|
| 259 |
+
# Option 1: Push directly to HF Space remote
|
| 260 |
+
git push space main
|
| 261 |
+
|
| 262 |
+
# Option 2: Trigger GitHub Actions manually
|
| 263 |
+
# Go to: https://github.com/YOUR-USERNAME/fleetmind-mcp/actions
|
| 264 |
+
# Click "Sync to Hugging Face Space"
|
| 265 |
+
# Click "Run workflow" β "Run workflow"
|
| 266 |
+
```
|
| 267 |
+
|
| 268 |
+
---
|
| 269 |
+
|
| 270 |
+
## π Workflow Diagram
|
| 271 |
+
|
| 272 |
+
```
|
| 273 |
+
Developer β GitHub Repo β GitHub Actions β HF Space β Live App
|
| 274 |
+
β β β β β
|
| 275 |
+
Codes Receives Triggers Updates Rebuilds
|
| 276 |
+
Push Event Sync with new & Serves
|
| 277 |
+
code
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
---
|
| 281 |
+
|
| 282 |
+
## π― Benefits of This Setup
|
| 283 |
+
|
| 284 |
+
β
**Team Collaboration:** Everyone works on GitHub (familiar workflow)
|
| 285 |
+
β
**Automatic Deployment:** Push to GitHub = Deploy to HF Space
|
| 286 |
+
β
**No Permission Issues:** GitHub Actions uses your HF_TOKEN
|
| 287 |
+
β
**Version Control:** Full history on GitHub
|
| 288 |
+
β
**CI/CD Ready:** Can add tests, linting, etc.
|
| 289 |
+
β
**Hackathon Compliant:** Final Space is on HF organization
|
| 290 |
+
|
| 291 |
+
---
|
| 292 |
+
|
| 293 |
+
## π Next Steps
|
| 294 |
+
|
| 295 |
+
After setup is complete:
|
| 296 |
+
|
| 297 |
+
1. **Invite team members to GitHub repo**
|
| 298 |
+
2. **Update README with team information**
|
| 299 |
+
3. **Continue building your project**
|
| 300 |
+
4. **Create demo video (1-5 minutes)**
|
| 301 |
+
5. **Post on social media**
|
| 302 |
+
6. **Submit before November 30, 2025**
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
## π Resources
|
| 307 |
+
|
| 308 |
+
- **GitHub Actions Docs:** https://docs.github.com/en/actions
|
| 309 |
+
- **HF Spaces Docs:** https://huggingface.co/docs/hub/spaces
|
| 310 |
+
- **Git LFS:** https://git-lfs.github.com/
|
| 311 |
+
- **Hackathon Info:** https://huggingface.co/MCP-1st-Birthday
|
| 312 |
+
|
| 313 |
+
---
|
| 314 |
+
|
| 315 |
+
**You're all set! Happy coding! π**
|
QUICK_START.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GitHub Actions Setup - Quick Start
|
| 2 |
+
|
| 3 |
+
## β‘ Complete These 4 Steps (15 minutes)
|
| 4 |
+
|
| 5 |
+
### STEP 1: Create GitHub Repository (2 min)
|
| 6 |
+
|
| 7 |
+
1. Go to: https://github.com/new
|
| 8 |
+
2. Name: `fleetmind-mcp`
|
| 9 |
+
3. Visibility: Public
|
| 10 |
+
4. **DON'T** initialize with README
|
| 11 |
+
5. Click "Create repository"
|
| 12 |
+
6. **SAVE YOUR USERNAME!**
|
| 13 |
+
|
| 14 |
+
---
|
| 15 |
+
|
| 16 |
+
### STEP 2: Get Hugging Face Token (2 min)
|
| 17 |
+
|
| 18 |
+
1. Go to: https://huggingface.co/settings/tokens
|
| 19 |
+
2. Click "Create new token"
|
| 20 |
+
3. Name: `GitHub Actions Sync`
|
| 21 |
+
4. Type: **Write**
|
| 22 |
+
5. Click "Generate token"
|
| 23 |
+
6. **COPY THE TOKEN** (starts with `hf_...`)
|
| 24 |
+
|
| 25 |
+
---
|
| 26 |
+
|
| 27 |
+
### STEP 3: Push to GitHub (3 min)
|
| 28 |
+
|
| 29 |
+
**Open PowerShell and run:**
|
| 30 |
+
|
| 31 |
+
```powershell
|
| 32 |
+
# Navigate to repo
|
| 33 |
+
cd F:\github-fleetmind-team
|
| 34 |
+
|
| 35 |
+
# Add and commit
|
| 36 |
+
git add .
|
| 37 |
+
git commit -m "Initial commit with GitHub Actions"
|
| 38 |
+
|
| 39 |
+
# Add GitHub remote (REPLACE YOUR-USERNAME!)
|
| 40 |
+
git remote add origin https://github.com/YOUR-USERNAME/fleetmind-mcp.git
|
| 41 |
+
|
| 42 |
+
# Push to GitHub
|
| 43 |
+
git branch -M main
|
| 44 |
+
git push -u origin main
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
**You'll be asked for credentials:**
|
| 48 |
+
- Username: Your GitHub username
|
| 49 |
+
- Password: Your GitHub Personal Access Token
|
| 50 |
+
- Get it at: https://github.com/settings/tokens
|
| 51 |
+
- Or use GitHub Desktop/CLI for easier auth
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
### STEP 4: Add HF_TOKEN to GitHub (3 min)
|
| 56 |
+
|
| 57 |
+
1. **Go to:** `https://github.com/YOUR-USERNAME/fleetmind-mcp/settings/secrets/actions`
|
| 58 |
+
|
| 59 |
+
2. **Click:** "New repository secret"
|
| 60 |
+
|
| 61 |
+
3. **Fill in:**
|
| 62 |
+
- Name: `HF_TOKEN`
|
| 63 |
+
- Secret: Paste your Hugging Face token from Step 2
|
| 64 |
+
|
| 65 |
+
4. **Click:** "Add secret"
|
| 66 |
+
|
| 67 |
+
---
|
| 68 |
+
|
| 69 |
+
## β
Test It Works (5 min)
|
| 70 |
+
|
| 71 |
+
```powershell
|
| 72 |
+
cd F:\github-fleetmind-team
|
| 73 |
+
|
| 74 |
+
# Make a small change
|
| 75 |
+
echo "`n## Auto-synced from GitHub" >> README.md
|
| 76 |
+
|
| 77 |
+
# Commit and push
|
| 78 |
+
git add README.md
|
| 79 |
+
git commit -m "Test: Auto-sync"
|
| 80 |
+
git push origin main
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
**Check:**
|
| 84 |
+
1. GitHub Actions: `https://github.com/YOUR-USERNAME/fleetmind-mcp/actions`
|
| 85 |
+
- Should show green checkmark β
|
| 86 |
+
2. HF Space: https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 87 |
+
- Should have new commit
|
| 88 |
+
|
| 89 |
+
---
|
| 90 |
+
|
| 91 |
+
## π Done! Now Your Team Can Collaborate
|
| 92 |
+
|
| 93 |
+
**Team members:**
|
| 94 |
+
|
| 95 |
+
```bash
|
| 96 |
+
# Clone GitHub repo
|
| 97 |
+
git clone https://github.com/YOUR-USERNAME/fleetmind-mcp.git
|
| 98 |
+
cd fleetmind-mcp
|
| 99 |
+
|
| 100 |
+
# Make changes
|
| 101 |
+
# ... edit files ...
|
| 102 |
+
|
| 103 |
+
# Push to GitHub (auto-syncs to HF Space!)
|
| 104 |
+
git add .
|
| 105 |
+
git commit -m "Add feature"
|
| 106 |
+
git push
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
**Every push to GitHub automatically updates HF Space!** β¨
|
| 110 |
+
|
| 111 |
+
---
|
| 112 |
+
|
| 113 |
+
## π Add Team Members to GitHub
|
| 114 |
+
|
| 115 |
+
1. Go to: `https://github.com/YOUR-USERNAME/fleetmind-mcp/settings/access`
|
| 116 |
+
2. Click "Add people"
|
| 117 |
+
3. Enter their GitHub username
|
| 118 |
+
4. Role: **Write**
|
| 119 |
+
5. Click "Add"
|
| 120 |
+
|
| 121 |
+
---
|
| 122 |
+
|
| 123 |
+
## π Quick Troubleshooting
|
| 124 |
+
|
| 125 |
+
**GitHub Actions fails?**
|
| 126 |
+
β Check HF_TOKEN is set correctly in GitHub Secrets
|
| 127 |
+
|
| 128 |
+
**Can't push to GitHub?**
|
| 129 |
+
β Create GitHub Personal Access Token: https://github.com/settings/tokens
|
| 130 |
+
|
| 131 |
+
**Files too large?**
|
| 132 |
+
β Use Git LFS (see full guide)
|
| 133 |
+
|
| 134 |
+
---
|
| 135 |
+
|
| 136 |
+
**Full documentation:** See `GITHUB_ACTIONS_SETUP.md`
|
| 137 |
+
|
| 138 |
+
**Ready to build!** π
|
README.md
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: FleetMind AI Dispatch Coordinator
|
| 3 |
+
emoji: π
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 5.9.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
tags:
|
| 11 |
+
- mcp
|
| 12 |
+
- mcp-in-action-track-01
|
| 13 |
+
- model-context-protocol
|
| 14 |
+
- multi-agent
|
| 15 |
+
- autonomous-ai
|
| 16 |
+
- gemini-2.0-flash
|
| 17 |
+
- delivery-management
|
| 18 |
+
- postgresql
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
# FleetMind MCP - Autonomous Dispatch Coordinator
|
| 22 |
+
|
| 23 |
+
**π MCP 1st Birthday Hackathon Submission - Track: MCP in Action**
|
| 24 |
+
|
| 25 |
+
An autonomous AI coordinator that handles delivery exceptions using multi-agent orchestration powered by Google Gemini 2.0 Flash and the Model Context Protocol (MCP).
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## π₯ Team
|
| 30 |
+
|
| 31 |
+
**Team Name:** [Your Team Name]
|
| 32 |
+
|
| 33 |
+
**Team Members:**
|
| 34 |
+
- **[Your Name]** - [@your-hf-username](https://huggingface.co/your-hf-username) - Lead Developer & Repository Manager
|
| 35 |
+
- **[Partner 2 Name]** - [@partner2-username](https://huggingface.co/partner2-username) - [Role - e.g., Backend Developer, Testing]
|
| 36 |
+
- **[Partner 3 Name]** - [@partner3-username](https://huggingface.co/partner3-username) - [Role - e.g., Documentation, Video]
|
| 37 |
+
- **[Partner 4 Name]** - [@partner4-username](https://huggingface.co/partner4-username) - [Role - e.g., UI/UX Designer]
|
| 38 |
+
- **[Partner 5 Name]** - [@partner5-username](https://huggingface.co/partner5-username) - [Role - e.g., Project Manager]
|
| 39 |
+
|
| 40 |
+
**Collaboration:** Team collaborates via [GitHub repository / Pull Requests / Task Division] with contributions managed by the lead developer.
|
| 41 |
+
|
| 42 |
+
*(Note: Replace placeholders with actual team member information. All members must have Hugging Face accounts and be listed here for valid hackathon submission.)*
|
| 43 |
+
|
| 44 |
+
---
|
| 45 |
+
|
| 46 |
+
## π Quick Start
|
| 47 |
+
|
| 48 |
+
### 1. Install PostgreSQL
|
| 49 |
+
|
| 50 |
+
**Windows:**
|
| 51 |
+
- Download from https://www.postgresql.org/download/windows/
|
| 52 |
+
- Install with default settings
|
| 53 |
+
- Remember your postgres password
|
| 54 |
+
|
| 55 |
+
**macOS:**
|
| 56 |
+
```bash
|
| 57 |
+
brew install postgresql
|
| 58 |
+
brew services start postgresql
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
**Linux:**
|
| 62 |
+
```bash
|
| 63 |
+
sudo apt-get install postgresql postgresql-contrib
|
| 64 |
+
sudo systemctl start postgresql
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
### 2. Create Database
|
| 68 |
+
|
| 69 |
+
```bash
|
| 70 |
+
# Login to PostgreSQL
|
| 71 |
+
psql -U postgres
|
| 72 |
+
|
| 73 |
+
# Create the database
|
| 74 |
+
CREATE DATABASE fleetmind;
|
| 75 |
+
|
| 76 |
+
# Exit
|
| 77 |
+
\q
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
### 3. Set Up Environment
|
| 81 |
+
|
| 82 |
+
```bash
|
| 83 |
+
# Install Python dependencies
|
| 84 |
+
pip install -r requirements.txt
|
| 85 |
+
|
| 86 |
+
# Copy environment template
|
| 87 |
+
cp .env.example .env
|
| 88 |
+
|
| 89 |
+
# Edit .env with your database credentials
|
| 90 |
+
# DB_HOST=localhost
|
| 91 |
+
# DB_PORT=5432
|
| 92 |
+
# DB_NAME=fleetmind
|
| 93 |
+
# DB_USER=postgres
|
| 94 |
+
# DB_PASSWORD=your_password_here
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
### 4. Initialize Database Schema
|
| 98 |
+
|
| 99 |
+
```bash
|
| 100 |
+
# Run database initialization script
|
| 101 |
+
python scripts/init_db.py
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
This will create all necessary tables in the PostgreSQL database.
|
| 105 |
+
|
| 106 |
+
### 3. Run Application
|
| 107 |
+
|
| 108 |
+
```bash
|
| 109 |
+
# Start the Gradio UI (coming soon)
|
| 110 |
+
python ui/app.py
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
## π Project Structure
|
| 114 |
+
|
| 115 |
+
```
|
| 116 |
+
fleetmind-mcp/
|
| 117 |
+
βββ database/ # Database connection and schema
|
| 118 |
+
β βββ __init__.py
|
| 119 |
+
β βββ connection.py # Database connection utilities
|
| 120 |
+
β βββ schema.py # Database schema definitions
|
| 121 |
+
βββ data/ # Database and data files
|
| 122 |
+
β βββ fleetmind.db # SQLite database (auto-generated)
|
| 123 |
+
βββ mcp_server/ # MCP server implementation
|
| 124 |
+
βββ agents/ # Multi-agent system
|
| 125 |
+
βββ workflows/ # Orchestration workflows
|
| 126 |
+
βββ ui/ # Gradio interface
|
| 127 |
+
βββ tests/ # Test suite
|
| 128 |
+
βββ scripts/ # Utility scripts
|
| 129 |
+
β βββ init_db.py # Database initialization
|
| 130 |
+
βββ requirements.txt # Python dependencies
|
| 131 |
+
βββ .env.example # Environment variables template
|
| 132 |
+
βββ README.md # This file
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
## π Database Schema (PostgreSQL)
|
| 136 |
+
|
| 137 |
+
The system uses PostgreSQL with the following tables:
|
| 138 |
+
|
| 139 |
+
### Orders Table
|
| 140 |
+
|
| 141 |
+
The `orders` table stores all delivery order information:
|
| 142 |
+
|
| 143 |
+
| Column | Type | Description |
|
| 144 |
+
|--------|------|-------------|
|
| 145 |
+
| order_id | VARCHAR(50) | Primary key |
|
| 146 |
+
| customer_name | VARCHAR(255) | Customer name |
|
| 147 |
+
| customer_phone | VARCHAR(20) | Contact phone |
|
| 148 |
+
| customer_email | VARCHAR(255) | Contact email |
|
| 149 |
+
| delivery_address | TEXT | Delivery address |
|
| 150 |
+
| delivery_lat/lng | DECIMAL(10,8) | GPS coordinates |
|
| 151 |
+
| time_window_start/end | TIMESTAMP | Delivery time window |
|
| 152 |
+
| priority | VARCHAR(20) | standard/express/urgent |
|
| 153 |
+
| weight_kg | DECIMAL(10,2) | Package weight |
|
| 154 |
+
| status | VARCHAR(20) | pending/assigned/in_transit/delivered/failed/cancelled |
|
| 155 |
+
| assigned_driver_id | VARCHAR(50) | Assigned driver |
|
| 156 |
+
| created_at | TIMESTAMP | Creation timestamp |
|
| 157 |
+
| updated_at | TIMESTAMP | Auto-updated timestamp |
|
| 158 |
+
|
| 159 |
+
### Additional Tables
|
| 160 |
+
|
| 161 |
+
- **drivers** - Driver information and status
|
| 162 |
+
- **assignments** - Order-driver assignments with routing
|
| 163 |
+
- **exceptions** - Exception tracking and resolution
|
| 164 |
+
- **agent_decisions** - AI agent decision logging
|
| 165 |
+
- **metrics** - Performance metrics and analytics
|
| 166 |
+
|
| 167 |
+
## π§ Development
|
| 168 |
+
|
| 169 |
+
### Database Operations
|
| 170 |
+
|
| 171 |
+
```python
|
| 172 |
+
from database.connection import get_db_connection, execute_query, execute_write
|
| 173 |
+
|
| 174 |
+
# Get all pending orders (note: PostgreSQL uses %s for parameters)
|
| 175 |
+
orders = execute_query("SELECT * FROM orders WHERE status = %s", ("pending",))
|
| 176 |
+
|
| 177 |
+
# Create new order
|
| 178 |
+
order_id = execute_write(
|
| 179 |
+
"INSERT INTO orders (order_id, customer_name, delivery_address, status) VALUES (%s, %s, %s, %s)",
|
| 180 |
+
("ORD-001", "John Doe", "123 Main St", "pending")
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# Test connection
|
| 184 |
+
from database.connection import test_connection
|
| 185 |
+
if test_connection():
|
| 186 |
+
print("Database connected successfully!")
|
| 187 |
+
```
|
| 188 |
+
|
| 189 |
+
## π License
|
| 190 |
+
|
| 191 |
+
MIT License
|
| 192 |
+
|
| 193 |
+
## π€ Contributing
|
| 194 |
+
|
| 195 |
+
Contributions welcome! Please read the contributing guidelines first.
|
TEAM_COLLABORATION_GUIDE.md
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Team Collaboration Guide - MCP 1st Birthday Hackathon
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
|
| 5 |
+
This guide explains how to add team partners to your FleetMind MCP hackathon submission on Hugging Face Spaces.
|
| 6 |
+
|
| 7 |
+
**Hackathon Details:**
|
| 8 |
+
- **Team Size:** 2-5 members allowed
|
| 9 |
+
- **Your Space:** https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 10 |
+
- **Submission Deadline:** November 30, 2025 (11:59 PM UTC)
|
| 11 |
+
- **Track:** MCP in Action (Track 01)
|
| 12 |
+
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
## Method 1: Add Team Members via README Documentation (EASIEST)
|
| 16 |
+
|
| 17 |
+
For hackathon submission purposes, you MUST document your team in the Space's README.md file.
|
| 18 |
+
|
| 19 |
+
### Steps:
|
| 20 |
+
|
| 21 |
+
1. **Edit the Team Section in README.md**
|
| 22 |
+
|
| 23 |
+
The Team section is already added to your README. Update it with your actual team information:
|
| 24 |
+
|
| 25 |
+
```markdown
|
| 26 |
+
## π₯ Team
|
| 27 |
+
|
| 28 |
+
**Team Name:** FleetMind AI Team
|
| 29 |
+
|
| 30 |
+
**Team Members:**
|
| 31 |
+
- **John Doe** - [@johndoe](https://huggingface.co/johndoe) - Lead Developer & AI Architect
|
| 32 |
+
- **Jane Smith** - [@janesmith](https://huggingface.co/janesmith) - Database Engineer
|
| 33 |
+
- **Alex Chen** - [@alexchen](https://huggingface.co/alexchen) - UI/UX Developer
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
2. **Replace Placeholders:**
|
| 37 |
+
- `[Your Team Name]` β Your actual team name
|
| 38 |
+
- `[Your Name]` β Team member's real name
|
| 39 |
+
- `@your-hf-username` β Their Hugging Face username
|
| 40 |
+
- `[Role]` β Their role in the project
|
| 41 |
+
|
| 42 |
+
3. **Commit and Push:**
|
| 43 |
+
|
| 44 |
+
```bash
|
| 45 |
+
cd F:\fleetmind-mcp\fleetmind-dispatch-ai
|
| 46 |
+
git add README.md
|
| 47 |
+
git commit -m "Update team member information"
|
| 48 |
+
git push
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
---
|
| 52 |
+
|
| 53 |
+
## Method 2: Grant Git Access to Team Partners (TECHNICAL COLLABORATION)
|
| 54 |
+
|
| 55 |
+
If your team partners need to push code directly to the Space, they need Git access.
|
| 56 |
+
|
| 57 |
+
### Option A: Via Organization Membership
|
| 58 |
+
|
| 59 |
+
Since your Space is owned by the **MCP-1st-Birthday organization**, team members can:
|
| 60 |
+
|
| 61 |
+
1. **Join the Organization:**
|
| 62 |
+
- Go to https://huggingface.co/MCP-1st-Birthday
|
| 63 |
+
- Click **"Request to join this org"** (top right)
|
| 64 |
+
- Fill out the registration form
|
| 65 |
+
- Wait for admin approval
|
| 66 |
+
|
| 67 |
+
2. **Verify Access:**
|
| 68 |
+
- Once approved, they'll automatically have access based on organization permissions
|
| 69 |
+
- Organization members with "write" or "contributor" roles can collaborate
|
| 70 |
+
|
| 71 |
+
### Option B: Direct Collaborator Access
|
| 72 |
+
|
| 73 |
+
If you have admin rights to your Space:
|
| 74 |
+
|
| 75 |
+
1. **Go to Space Settings:**
|
| 76 |
+
- Visit: https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai/settings
|
| 77 |
+
- Look for "Collaborators" or "Access" section
|
| 78 |
+
|
| 79 |
+
2. **Add Collaborators by Username:**
|
| 80 |
+
- Enter their Hugging Face username
|
| 81 |
+
- Set their permission level (read/write/admin)
|
| 82 |
+
- Send invitation
|
| 83 |
+
|
| 84 |
+
---
|
| 85 |
+
|
| 86 |
+
## Method 3: Collaborate via Pull Requests (SAFEST)
|
| 87 |
+
|
| 88 |
+
Team members can contribute without direct write access using Pull Requests.
|
| 89 |
+
|
| 90 |
+
### Steps:
|
| 91 |
+
|
| 92 |
+
1. **Team Partner Forks/Duplicates the Space:**
|
| 93 |
+
- They go to your Space page
|
| 94 |
+
- Click the three dots (top right) β "Duplicate this Space"
|
| 95 |
+
- Make changes in their forked version
|
| 96 |
+
|
| 97 |
+
2. **Create Pull Request:**
|
| 98 |
+
- After making changes, they create a Pull Request
|
| 99 |
+
- You review and merge their changes
|
| 100 |
+
|
| 101 |
+
3. **Enable Pull Requests:**
|
| 102 |
+
- Go to Space Settings
|
| 103 |
+
- Ensure "Pull Requests" are enabled
|
| 104 |
+
|
| 105 |
+
---
|
| 106 |
+
|
| 107 |
+
## Method 4: Share Git Credentials (NOT RECOMMENDED)
|
| 108 |
+
|
| 109 |
+
While technically possible, sharing your Git credentials is NOT recommended for security reasons. Use Methods 1-3 instead.
|
| 110 |
+
|
| 111 |
+
---
|
| 112 |
+
|
| 113 |
+
## Technical Setup for Team Partners
|
| 114 |
+
|
| 115 |
+
Once your team partners have access, they need to set up their local environment:
|
| 116 |
+
|
| 117 |
+
### 1. Clone the Space
|
| 118 |
+
|
| 119 |
+
```bash
|
| 120 |
+
# Navigate to desired directory
|
| 121 |
+
cd F:\
|
| 122 |
+
|
| 123 |
+
# Clone the Space
|
| 124 |
+
git clone https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 125 |
+
|
| 126 |
+
# Enter directory
|
| 127 |
+
cd fleetmind-dispatch-ai
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
### 2. Authenticate with Hugging Face
|
| 131 |
+
|
| 132 |
+
They need a Hugging Face access token:
|
| 133 |
+
|
| 134 |
+
1. **Get Token:**
|
| 135 |
+
- Go to https://huggingface.co/settings/tokens
|
| 136 |
+
- Click "New token"
|
| 137 |
+
- Create a token with "write" permissions
|
| 138 |
+
|
| 139 |
+
2. **Login via CLI:**
|
| 140 |
+
```bash
|
| 141 |
+
# Install Hugging Face CLI
|
| 142 |
+
pip install huggingface_hub
|
| 143 |
+
|
| 144 |
+
# Login (they'll be prompted for token)
|
| 145 |
+
huggingface-cli login
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
3. **Or Configure Git Credentials:**
|
| 149 |
+
```bash
|
| 150 |
+
git config credential.helper store
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
When they push for the first time, Git will ask for:
|
| 154 |
+
- Username: Their HF username
|
| 155 |
+
- Password: Their HF access token (NOT their account password)
|
| 156 |
+
|
| 157 |
+
### 3. Make Changes and Push
|
| 158 |
+
|
| 159 |
+
```bash
|
| 160 |
+
# Make changes to files
|
| 161 |
+
# ...
|
| 162 |
+
|
| 163 |
+
# Stage changes
|
| 164 |
+
git add .
|
| 165 |
+
|
| 166 |
+
# Commit
|
| 167 |
+
git commit -m "Add feature X"
|
| 168 |
+
|
| 169 |
+
# Push to Space
|
| 170 |
+
git push
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
---
|
| 174 |
+
|
| 175 |
+
## Hugging Face Spaces Access Levels
|
| 176 |
+
|
| 177 |
+
Understanding permission levels helps you decide what access to grant:
|
| 178 |
+
|
| 179 |
+
| Role | Can View | Can Clone | Can Push | Can Manage Settings |
|
| 180 |
+
|------|----------|-----------|----------|---------------------|
|
| 181 |
+
| **Public** | β
| β
| β | β |
|
| 182 |
+
| **Read** | β
| β
| β | β |
|
| 183 |
+
| **Contributor** | β
| β
| Via PR only | β |
|
| 184 |
+
| **Write** | β
| β
| β
| β |
|
| 185 |
+
| **Admin** | β
| β
| β
| β
|
|
| 186 |
+
|
| 187 |
+
---
|
| 188 |
+
|
| 189 |
+
## Hackathon Submission Requirements
|
| 190 |
+
|
| 191 |
+
For your team submission to be valid, ensure:
|
| 192 |
+
|
| 193 |
+
### Required in README.md:
|
| 194 |
+
- β
**Team section** with all member names and HF usernames
|
| 195 |
+
- β
**Track tag:** `mcp-in-action-track-01` (already added)
|
| 196 |
+
- β
**Demo video link** (1-5 minutes) - TODO
|
| 197 |
+
- β
**Social media post link** - TODO
|
| 198 |
+
|
| 199 |
+
### Required in Space:
|
| 200 |
+
- β
Published as a Space in MCP-1st-Birthday organization
|
| 201 |
+
- β
App.py entry point (already created)
|
| 202 |
+
- β
Working Gradio interface
|
| 203 |
+
- β
All code created during hackathon period (Nov 14-30, 2025)
|
| 204 |
+
|
| 205 |
+
---
|
| 206 |
+
|
| 207 |
+
## Troubleshooting
|
| 208 |
+
|
| 209 |
+
### "Permission denied" when team partner tries to push
|
| 210 |
+
|
| 211 |
+
**Solution:**
|
| 212 |
+
1. Verify they're added as collaborators with write access
|
| 213 |
+
2. Check they're using the correct HF access token (not account password)
|
| 214 |
+
3. Ensure token has "write" permissions
|
| 215 |
+
|
| 216 |
+
### "Repository not found" error
|
| 217 |
+
|
| 218 |
+
**Solution:**
|
| 219 |
+
1. Verify the Space URL is correct
|
| 220 |
+
2. Check they have at least "read" access
|
| 221 |
+
3. Ensure they're logged in: `huggingface-cli whoami`
|
| 222 |
+
|
| 223 |
+
### Team member can't see the Space
|
| 224 |
+
|
| 225 |
+
**Solution:**
|
| 226 |
+
1. If Space is private, add them as collaborators
|
| 227 |
+
2. If Space is public (recommended for hackathon), they should see it
|
| 228 |
+
3. Check organization membership status
|
| 229 |
+
|
| 230 |
+
---
|
| 231 |
+
|
| 232 |
+
## Best Practices for Team Collaboration
|
| 233 |
+
|
| 234 |
+
1. **Communication:**
|
| 235 |
+
- Use Discord channel: agents-mcp-hackathon-winter25
|
| 236 |
+
- Create a team group chat
|
| 237 |
+
- Document decisions in README
|
| 238 |
+
|
| 239 |
+
2. **Code Management:**
|
| 240 |
+
- Pull before making changes: `git pull`
|
| 241 |
+
- Commit frequently with clear messages
|
| 242 |
+
- Test locally before pushing
|
| 243 |
+
|
| 244 |
+
3. **Task Distribution:**
|
| 245 |
+
- Assign specific files/features to team members
|
| 246 |
+
- Avoid editing the same files simultaneously
|
| 247 |
+
- Use TODO comments in code
|
| 248 |
+
|
| 249 |
+
4. **Version Control:**
|
| 250 |
+
- Create branches for major features (optional)
|
| 251 |
+
- Use descriptive commit messages
|
| 252 |
+
- Review each other's code
|
| 253 |
+
|
| 254 |
+
---
|
| 255 |
+
|
| 256 |
+
## Quick Reference Commands
|
| 257 |
+
|
| 258 |
+
```bash
|
| 259 |
+
# Clone the Space
|
| 260 |
+
git clone https://huggingface.co/spaces/MCP-1st-Birthday/fleetmind-dispatch-ai
|
| 261 |
+
|
| 262 |
+
# Check current status
|
| 263 |
+
git status
|
| 264 |
+
|
| 265 |
+
# Pull latest changes
|
| 266 |
+
git pull
|
| 267 |
+
|
| 268 |
+
# Add all changes
|
| 269 |
+
git add .
|
| 270 |
+
|
| 271 |
+
# Commit with message
|
| 272 |
+
git commit -m "Description of changes"
|
| 273 |
+
|
| 274 |
+
# Push to Space
|
| 275 |
+
git push
|
| 276 |
+
|
| 277 |
+
# Check who you're logged in as
|
| 278 |
+
huggingface-cli whoami
|
| 279 |
+
|
| 280 |
+
# Login to HF
|
| 281 |
+
huggingface-cli login
|
| 282 |
+
```
|
| 283 |
+
|
| 284 |
+
---
|
| 285 |
+
|
| 286 |
+
## Support & Resources
|
| 287 |
+
|
| 288 |
+
- **Hackathon Discord:** agents-mcp-hackathon-winter25 channel
|
| 289 |
+
- **Office Hours:** November 17-28 with Gradio team
|
| 290 |
+
- **HF Documentation:** https://huggingface.co/docs/hub/spaces
|
| 291 |
+
- **Git Documentation:** https://git-scm.com/doc
|
| 292 |
+
|
| 293 |
+
---
|
| 294 |
+
|
| 295 |
+
## Timeline Reminder
|
| 296 |
+
|
| 297 |
+
- **Start Date:** November 14, 2025
|
| 298 |
+
- **Submission Deadline:** November 30, 2025 (11:59 PM UTC)
|
| 299 |
+
- **Days Remaining:** Check dashboard regularly
|
| 300 |
+
|
| 301 |
+
Make sure all team members are added to README.md before the deadline!
|
| 302 |
+
|
| 303 |
+
---
|
| 304 |
+
|
| 305 |
+
**Good luck with your hackathon submission! π**
|
app.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
FleetMind MCP - Hugging Face Spaces Entry Point
|
| 3 |
+
This is the main entry point for the HF Space deployment
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Add current directory to path
|
| 10 |
+
sys.path.insert(0, str(Path(__file__).parent))
|
| 11 |
+
|
| 12 |
+
# Import and launch the Gradio app
|
| 13 |
+
from ui.app import create_interface
|
| 14 |
+
|
| 15 |
+
if __name__ == "__main__":
|
| 16 |
+
print("=" * 60)
|
| 17 |
+
print("FleetMind MCP - Starting on Hugging Face Spaces")
|
| 18 |
+
print("=" * 60)
|
| 19 |
+
|
| 20 |
+
# Create and launch the interface
|
| 21 |
+
app = create_interface()
|
| 22 |
+
app.launch(
|
| 23 |
+
server_name="0.0.0.0",
|
| 24 |
+
server_port=7860,
|
| 25 |
+
share=False,
|
| 26 |
+
show_error=True
|
| 27 |
+
)
|
chat/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chat package for FleetMind MCP
|
| 3 |
+
Handles AI-powered natural language order creation
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .chat_engine import ChatEngine
|
| 7 |
+
from .conversation import ConversationManager
|
| 8 |
+
|
| 9 |
+
__all__ = ['ChatEngine', 'ConversationManager']
|
chat/chat_engine.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Chat engine for FleetMind
|
| 3 |
+
Main orchestrator for AI-powered conversations with multi-provider support
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import logging
|
| 8 |
+
from typing import Tuple, List, Dict
|
| 9 |
+
|
| 10 |
+
from chat.providers import ClaudeProvider, GeminiProvider
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ChatEngine:
|
| 16 |
+
"""Main orchestrator for AI chat conversations with multi-provider support"""
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
# Get provider selection from environment
|
| 20 |
+
provider_name = os.getenv("AI_PROVIDER", "anthropic").lower()
|
| 21 |
+
|
| 22 |
+
logger.info(f"ChatEngine: Selected provider: {provider_name}")
|
| 23 |
+
|
| 24 |
+
# Initialize the selected provider
|
| 25 |
+
if provider_name == "gemini":
|
| 26 |
+
self.provider = GeminiProvider()
|
| 27 |
+
logger.info("ChatEngine: Using Gemini provider")
|
| 28 |
+
elif provider_name == "anthropic":
|
| 29 |
+
self.provider = ClaudeProvider()
|
| 30 |
+
logger.info("ChatEngine: Using Claude provider")
|
| 31 |
+
else:
|
| 32 |
+
# Default to Claude if unknown provider
|
| 33 |
+
logger.warning(f"ChatEngine: Unknown provider '{provider_name}', defaulting to Claude")
|
| 34 |
+
self.provider = ClaudeProvider()
|
| 35 |
+
|
| 36 |
+
# Store provider name for UI
|
| 37 |
+
self.selected_provider = provider_name
|
| 38 |
+
|
| 39 |
+
def is_available(self) -> bool:
|
| 40 |
+
"""Check if the chat engine is available"""
|
| 41 |
+
return self.provider.is_available()
|
| 42 |
+
|
| 43 |
+
def get_status(self) -> str:
|
| 44 |
+
"""Get status message for UI"""
|
| 45 |
+
provider_status = self.provider.get_status()
|
| 46 |
+
provider_name = self.provider.get_provider_name()
|
| 47 |
+
|
| 48 |
+
return f"**{provider_name}:** {provider_status}"
|
| 49 |
+
|
| 50 |
+
def get_provider_name(self) -> str:
|
| 51 |
+
"""Get the active provider name"""
|
| 52 |
+
return self.provider.get_provider_name()
|
| 53 |
+
|
| 54 |
+
def get_model_name(self) -> str:
|
| 55 |
+
"""Get the active model name"""
|
| 56 |
+
return self.provider.get_model_name()
|
| 57 |
+
|
| 58 |
+
def process_message(
|
| 59 |
+
self,
|
| 60 |
+
user_message: str,
|
| 61 |
+
conversation
|
| 62 |
+
) -> Tuple[str, List[Dict]]:
|
| 63 |
+
"""
|
| 64 |
+
Process user message and return AI response
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
user_message: User's message
|
| 68 |
+
conversation: ConversationManager instance
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
Tuple of (assistant_response, tool_calls_made)
|
| 72 |
+
"""
|
| 73 |
+
return self.provider.process_message(user_message, conversation)
|
| 74 |
+
|
| 75 |
+
def get_welcome_message(self) -> str:
|
| 76 |
+
"""Get welcome message for new conversations"""
|
| 77 |
+
return self.provider.get_welcome_message()
|
| 78 |
+
|
| 79 |
+
def get_full_status(self) -> Dict[str, str]:
|
| 80 |
+
"""
|
| 81 |
+
Get detailed status for all providers
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
Dict with status for each provider
|
| 85 |
+
"""
|
| 86 |
+
# Get status without creating new instances (avoid API calls)
|
| 87 |
+
claude_key = os.getenv("ANTHROPIC_API_KEY", "")
|
| 88 |
+
gemini_key = os.getenv("GOOGLE_API_KEY", "")
|
| 89 |
+
|
| 90 |
+
claude_available = bool(claude_key and not claude_key.startswith("your_"))
|
| 91 |
+
gemini_available = bool(gemini_key and not gemini_key.startswith("your_"))
|
| 92 |
+
|
| 93 |
+
claude_status = "β
Connected - Model: claude-3-5-sonnet-20241022" if claude_available else "β οΈ Not configured (add ANTHROPIC_API_KEY)"
|
| 94 |
+
gemini_status = f"β
Connected - Model: {self.provider.get_model_name()}" if (self.selected_provider == "gemini" and gemini_available) else "β οΈ Not configured (add GOOGLE_API_KEY)" if not gemini_available else "β
Configured"
|
| 95 |
+
|
| 96 |
+
return {
|
| 97 |
+
"selected": self.selected_provider,
|
| 98 |
+
"claude": {
|
| 99 |
+
"name": "Claude (Anthropic)",
|
| 100 |
+
"status": claude_status,
|
| 101 |
+
"available": claude_available
|
| 102 |
+
},
|
| 103 |
+
"gemini": {
|
| 104 |
+
"name": "Gemini (Google)",
|
| 105 |
+
"status": gemini_status,
|
| 106 |
+
"available": gemini_available
|
| 107 |
+
}
|
| 108 |
+
}
|
chat/conversation.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Conversation manager for FleetMind chat
|
| 3 |
+
Handles conversation state and history
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from typing import List, Dict
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ConversationManager:
|
| 13 |
+
"""Manage conversation state and history"""
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.history = []
|
| 17 |
+
self.tool_calls = [] # Track all tool calls for transparency
|
| 18 |
+
self.order_context = {} # Accumulated order details
|
| 19 |
+
|
| 20 |
+
def add_message(self, role: str, content: str):
|
| 21 |
+
"""
|
| 22 |
+
Add message to conversation history
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
role: "user" or "assistant"
|
| 26 |
+
content: Message content
|
| 27 |
+
"""
|
| 28 |
+
self.history.append({
|
| 29 |
+
"role": role,
|
| 30 |
+
"content": content
|
| 31 |
+
})
|
| 32 |
+
|
| 33 |
+
def add_tool_result(self, tool_name: str, tool_input: dict, tool_result: dict):
|
| 34 |
+
"""
|
| 35 |
+
Track tool usage for transparency
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
tool_name: Name of the tool called
|
| 39 |
+
tool_input: Input parameters
|
| 40 |
+
tool_result: Result from tool execution
|
| 41 |
+
"""
|
| 42 |
+
self.tool_calls.append({
|
| 43 |
+
"tool": tool_name,
|
| 44 |
+
"input": tool_input,
|
| 45 |
+
"result": tool_result
|
| 46 |
+
})
|
| 47 |
+
|
| 48 |
+
def get_history(self) -> List[Dict]:
|
| 49 |
+
"""Get full conversation history"""
|
| 50 |
+
return self.history
|
| 51 |
+
|
| 52 |
+
def get_tool_calls(self) -> List[Dict]:
|
| 53 |
+
"""Get all tool calls made in this conversation"""
|
| 54 |
+
return self.tool_calls
|
| 55 |
+
|
| 56 |
+
def get_last_tool_call(self) -> Dict:
|
| 57 |
+
"""Get the most recent tool call"""
|
| 58 |
+
if self.tool_calls:
|
| 59 |
+
return self.tool_calls[-1]
|
| 60 |
+
return {}
|
| 61 |
+
|
| 62 |
+
def clear_tool_calls(self):
|
| 63 |
+
"""Clear tool call history"""
|
| 64 |
+
self.tool_calls = []
|
| 65 |
+
|
| 66 |
+
def reset(self):
|
| 67 |
+
"""Start a new conversation"""
|
| 68 |
+
self.history = []
|
| 69 |
+
self.tool_calls = []
|
| 70 |
+
self.order_context = {}
|
| 71 |
+
logger.info("Conversation reset")
|
| 72 |
+
|
| 73 |
+
def get_message_count(self) -> int:
|
| 74 |
+
"""Get number of messages in conversation"""
|
| 75 |
+
return len(self.history)
|
| 76 |
+
|
| 77 |
+
def get_formatted_history(self) -> List[Dict]:
|
| 78 |
+
"""
|
| 79 |
+
Get history formatted for Gradio chatbot (messages format)
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
List of message dictionaries with 'role' and 'content' keys
|
| 83 |
+
"""
|
| 84 |
+
# For Gradio type="messages", return list of dicts with role/content
|
| 85 |
+
return self.history
|
chat/geocoding.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Geocoding service for FleetMind
|
| 3 |
+
Handles address validation with HERE API and smart mock fallback
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import requests
|
| 8 |
+
import logging
|
| 9 |
+
from typing import Dict, Optional
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
# Common city coordinates for mock geocoding
|
| 14 |
+
CITY_COORDINATES = {
|
| 15 |
+
"san francisco": (37.7749, -122.4194),
|
| 16 |
+
"sf": (37.7749, -122.4194),
|
| 17 |
+
"new york": (40.7128, -74.0060),
|
| 18 |
+
"nyc": (40.7128, -74.0060),
|
| 19 |
+
"los angeles": (34.0522, -118.2437),
|
| 20 |
+
"la": (34.0522, -118.2437),
|
| 21 |
+
"chicago": (41.8781, -87.6298),
|
| 22 |
+
"houston": (29.7604, -95.3698),
|
| 23 |
+
"phoenix": (33.4484, -112.0740),
|
| 24 |
+
"philadelphia": (39.9526, -75.1652),
|
| 25 |
+
"san antonio": (29.4241, -98.4936),
|
| 26 |
+
"san diego": (32.7157, -117.1611),
|
| 27 |
+
"dallas": (32.7767, -96.7970),
|
| 28 |
+
"austin": (30.2672, -97.7431),
|
| 29 |
+
"seattle": (47.6062, -122.3321),
|
| 30 |
+
"boston": (42.3601, -71.0589),
|
| 31 |
+
"denver": (39.7392, -104.9903),
|
| 32 |
+
"miami": (25.7617, -80.1918),
|
| 33 |
+
"atlanta": (33.7490, -84.3880),
|
| 34 |
+
"portland": (45.5152, -122.6784),
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class GeocodingService:
|
| 39 |
+
"""Handle address geocoding with HERE API and smart mock fallback"""
|
| 40 |
+
|
| 41 |
+
def __init__(self):
|
| 42 |
+
self.here_api_key = os.getenv("HERE_API_KEY", "")
|
| 43 |
+
self.use_mock = not self.here_api_key or self.here_api_key.startswith("your_")
|
| 44 |
+
|
| 45 |
+
if self.use_mock:
|
| 46 |
+
logger.info("Geocoding: Using mock (HERE_API_KEY not configured)")
|
| 47 |
+
else:
|
| 48 |
+
logger.info("Geocoding: Using HERE Maps API")
|
| 49 |
+
|
| 50 |
+
def geocode(self, address: str) -> Dict:
|
| 51 |
+
"""
|
| 52 |
+
Geocode address, using mock if API unavailable
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
address: Street address to geocode
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
Dict with keys: lat, lng, formatted_address, confidence
|
| 59 |
+
"""
|
| 60 |
+
if self.use_mock:
|
| 61 |
+
return self._geocode_mock(address)
|
| 62 |
+
else:
|
| 63 |
+
try:
|
| 64 |
+
return self._geocode_here(address)
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"HERE API failed: {e}, falling back to mock")
|
| 67 |
+
return self._geocode_mock(address)
|
| 68 |
+
|
| 69 |
+
def _geocode_here(self, address: str) -> Dict:
|
| 70 |
+
"""Real HERE API geocoding"""
|
| 71 |
+
url = "https://geocode.search.hereapi.com/v1/geocode"
|
| 72 |
+
|
| 73 |
+
params = {
|
| 74 |
+
"q": address,
|
| 75 |
+
"apiKey": self.here_api_key
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
response = requests.get(url, params=params, timeout=10)
|
| 79 |
+
response.raise_for_status()
|
| 80 |
+
|
| 81 |
+
data = response.json()
|
| 82 |
+
|
| 83 |
+
if not data.get("items"):
|
| 84 |
+
# No results found, fall back to mock
|
| 85 |
+
logger.warning(f"HERE API found no results for: {address}")
|
| 86 |
+
return self._geocode_mock(address)
|
| 87 |
+
|
| 88 |
+
# Get first result
|
| 89 |
+
item = data["items"][0]
|
| 90 |
+
position = item["position"]
|
| 91 |
+
|
| 92 |
+
return {
|
| 93 |
+
"lat": position["lat"],
|
| 94 |
+
"lng": position["lng"],
|
| 95 |
+
"formatted_address": item.get("address", {}).get("label", address),
|
| 96 |
+
"confidence": "high (HERE API)"
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
def _geocode_mock(self, address: str) -> Dict:
|
| 100 |
+
"""
|
| 101 |
+
Smart mock geocoding for testing
|
| 102 |
+
Tries to detect city name and use approximate coordinates
|
| 103 |
+
"""
|
| 104 |
+
address_lower = address.lower()
|
| 105 |
+
|
| 106 |
+
# Try to find a city match
|
| 107 |
+
for city, coords in CITY_COORDINATES.items():
|
| 108 |
+
if city in address_lower:
|
| 109 |
+
logger.info(f"Mock geocoding detected city: {city}")
|
| 110 |
+
return {
|
| 111 |
+
"lat": coords[0],
|
| 112 |
+
"lng": coords[1],
|
| 113 |
+
"formatted_address": address,
|
| 114 |
+
"confidence": f"medium (mock - {city})"
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
# Default to San Francisco if no city detected
|
| 118 |
+
logger.info("Mock geocoding: Using default SF coordinates")
|
| 119 |
+
return {
|
| 120 |
+
"lat": 37.7749,
|
| 121 |
+
"lng": -122.4194,
|
| 122 |
+
"formatted_address": address,
|
| 123 |
+
"confidence": "low (mock - default)"
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
def get_status(self) -> str:
|
| 127 |
+
"""Get geocoding service status"""
|
| 128 |
+
if self.use_mock:
|
| 129 |
+
return "β οΈ Using mock geocoding (add HERE_API_KEY for real)"
|
| 130 |
+
else:
|
| 131 |
+
return "β
HERE Maps API connected"
|
chat/providers/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AI Provider implementations for FleetMind chat
|
| 3 |
+
Supports multiple AI providers (Anthropic Claude, Google Gemini)
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .base_provider import AIProvider
|
| 7 |
+
from .claude_provider import ClaudeProvider
|
| 8 |
+
from .gemini_provider import GeminiProvider
|
| 9 |
+
|
| 10 |
+
__all__ = ['AIProvider', 'ClaudeProvider', 'GeminiProvider']
|
chat/providers/base_provider.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Base provider interface for AI providers
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from abc import ABC, abstractmethod
|
| 6 |
+
from typing import Tuple, List, Dict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AIProvider(ABC):
|
| 10 |
+
"""Abstract base class for AI providers"""
|
| 11 |
+
|
| 12 |
+
@abstractmethod
|
| 13 |
+
def is_available(self) -> bool:
|
| 14 |
+
"""Check if the provider is available (API key configured)"""
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
@abstractmethod
|
| 18 |
+
def get_status(self) -> str:
|
| 19 |
+
"""Get status message for UI"""
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
@abstractmethod
|
| 23 |
+
def get_provider_name(self) -> str:
|
| 24 |
+
"""Get provider name (e.g., 'Claude', 'Gemini')"""
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
@abstractmethod
|
| 28 |
+
def get_model_name(self) -> str:
|
| 29 |
+
"""Get model name (e.g., 'claude-3-5-sonnet-20241022')"""
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
@abstractmethod
|
| 33 |
+
def process_message(
|
| 34 |
+
self,
|
| 35 |
+
user_message: str,
|
| 36 |
+
conversation
|
| 37 |
+
) -> Tuple[str, List[Dict]]:
|
| 38 |
+
"""
|
| 39 |
+
Process user message and return AI response
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
user_message: User's message
|
| 43 |
+
conversation: ConversationManager instance
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
Tuple of (assistant_response, tool_calls_made)
|
| 47 |
+
"""
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
@abstractmethod
|
| 51 |
+
def get_welcome_message(self) -> str:
|
| 52 |
+
"""Get welcome message for new conversations"""
|
| 53 |
+
pass
|
chat/providers/claude_provider.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Anthropic Claude provider for FleetMind chat
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Tuple, List, Dict
|
| 8 |
+
from anthropic import Anthropic, APIError, APIConnectionError, AuthenticationError
|
| 9 |
+
|
| 10 |
+
from chat.providers.base_provider import AIProvider
|
| 11 |
+
from chat.tools import TOOLS_SCHEMA, execute_tool
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ClaudeProvider(AIProvider):
|
| 17 |
+
"""Anthropic Claude AI provider"""
|
| 18 |
+
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.api_key = os.getenv("ANTHROPIC_API_KEY", "")
|
| 21 |
+
self.api_available = bool(self.api_key and not self.api_key.startswith("your_"))
|
| 22 |
+
|
| 23 |
+
if self.api_available:
|
| 24 |
+
try:
|
| 25 |
+
self.client = Anthropic(api_key=self.api_key)
|
| 26 |
+
logger.info("ClaudeProvider: Initialized successfully")
|
| 27 |
+
except Exception as e:
|
| 28 |
+
logger.error(f"ClaudeProvider: Failed to initialize: {e}")
|
| 29 |
+
self.api_available = False
|
| 30 |
+
else:
|
| 31 |
+
self.client = None
|
| 32 |
+
logger.warning("ClaudeProvider: ANTHROPIC_API_KEY not configured")
|
| 33 |
+
|
| 34 |
+
self.model = "claude-3-5-sonnet-20241022"
|
| 35 |
+
self.system_prompt = self._get_system_prompt()
|
| 36 |
+
|
| 37 |
+
def _get_system_prompt(self) -> str:
|
| 38 |
+
"""Get the system prompt for Claude"""
|
| 39 |
+
return """You are an AI assistant for FleetMind, a delivery dispatch system. Your job is to help coordinators create delivery orders efficiently.
|
| 40 |
+
|
| 41 |
+
**IMPORTANT: When a user wants to create an order, FIRST show them this order form:**
|
| 42 |
+
|
| 43 |
+
π **Order Information Form**
|
| 44 |
+
Please provide the following details:
|
| 45 |
+
|
| 46 |
+
**Required Fields:**
|
| 47 |
+
β’ Customer Name: [Full name]
|
| 48 |
+
β’ Delivery Address: [Street address, city, state, zip]
|
| 49 |
+
β’ Contact: [Phone number OR email address]
|
| 50 |
+
|
| 51 |
+
**Optional Fields:**
|
| 52 |
+
β’ Delivery Deadline: [Date/time, or "ASAP" - default: 6 hours from now]
|
| 53 |
+
β’ Priority: [standard/express/urgent - default: standard]
|
| 54 |
+
β’ Special Instructions: [Any special notes]
|
| 55 |
+
β’ Package Weight: [In kg - default: 5.0 kg]
|
| 56 |
+
|
| 57 |
+
**Example:**
|
| 58 |
+
"Customer: John Doe, Address: 123 Main St, San Francisco, CA 94103, Phone: 555-1234, Deliver by 5 PM today"
|
| 59 |
+
|
| 60 |
+
---
|
| 61 |
+
|
| 62 |
+
**Your Workflow:**
|
| 63 |
+
1. **If user says "create order" or similar:** Show the form above and ask them to provide the information
|
| 64 |
+
2. **If they provide all/most info:** Proceed immediately with geocoding and order creation
|
| 65 |
+
3. **If information is missing:** Show what's missing from the form and ask for those specific fields
|
| 66 |
+
4. **After collecting required fields:**
|
| 67 |
+
- Use `geocode_address` tool to validate the address
|
| 68 |
+
- Use `create_order` tool to save the order
|
| 69 |
+
- Provide a clear confirmation with order ID
|
| 70 |
+
|
| 71 |
+
**Important Rules:**
|
| 72 |
+
- ALWAYS geocode the address BEFORE creating an order
|
| 73 |
+
- Be efficient - don't ask questions one at a time
|
| 74 |
+
- Accept information in any format (natural language, bullet points, etc.)
|
| 75 |
+
- Keep responses concise and professional
|
| 76 |
+
- Show enthusiasm when orders are successfully created
|
| 77 |
+
|
| 78 |
+
Remember: Dispatch coordinators are busy - help them create orders FAST!"""
|
| 79 |
+
|
| 80 |
+
def is_available(self) -> bool:
|
| 81 |
+
return self.api_available
|
| 82 |
+
|
| 83 |
+
def get_status(self) -> str:
|
| 84 |
+
if self.api_available:
|
| 85 |
+
return f"β
Connected - Model: {self.model}"
|
| 86 |
+
return "β οΈ Not configured (add ANTHROPIC_API_KEY)"
|
| 87 |
+
|
| 88 |
+
def get_provider_name(self) -> str:
|
| 89 |
+
return "Claude (Anthropic)"
|
| 90 |
+
|
| 91 |
+
def get_model_name(self) -> str:
|
| 92 |
+
return self.model
|
| 93 |
+
|
| 94 |
+
def process_message(
|
| 95 |
+
self,
|
| 96 |
+
user_message: str,
|
| 97 |
+
conversation
|
| 98 |
+
) -> Tuple[str, List[Dict]]:
|
| 99 |
+
"""Process user message with Claude"""
|
| 100 |
+
if not self.api_available:
|
| 101 |
+
return self._handle_no_api(), []
|
| 102 |
+
|
| 103 |
+
# Add user message to history
|
| 104 |
+
conversation.add_message("user", user_message)
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
# Make API call to Claude
|
| 108 |
+
response = self.client.messages.create(
|
| 109 |
+
model=self.model,
|
| 110 |
+
max_tokens=4096,
|
| 111 |
+
system=self.system_prompt,
|
| 112 |
+
tools=TOOLS_SCHEMA,
|
| 113 |
+
messages=conversation.get_history()
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
# Process response and handle tool calls
|
| 117 |
+
return self._process_response(response, conversation)
|
| 118 |
+
|
| 119 |
+
except AuthenticationError:
|
| 120 |
+
error_msg = "β οΈ Invalid API key. Please check your ANTHROPIC_API_KEY in .env file."
|
| 121 |
+
logger.error("Authentication error with Anthropic API")
|
| 122 |
+
return error_msg, []
|
| 123 |
+
|
| 124 |
+
except APIConnectionError:
|
| 125 |
+
error_msg = "β οΈ Cannot connect to Anthropic API. Please check your internet connection."
|
| 126 |
+
logger.error("Connection error with Anthropic API")
|
| 127 |
+
return error_msg, []
|
| 128 |
+
|
| 129 |
+
except APIError as e:
|
| 130 |
+
error_msg = f"β οΈ API error: {str(e)}"
|
| 131 |
+
logger.error(f"Anthropic API error: {e}")
|
| 132 |
+
return error_msg, []
|
| 133 |
+
|
| 134 |
+
except Exception as e:
|
| 135 |
+
error_msg = f"β οΈ Unexpected error: {str(e)}"
|
| 136 |
+
logger.error(f"Claude provider error: {e}")
|
| 137 |
+
return error_msg, []
|
| 138 |
+
|
| 139 |
+
def _process_response(
|
| 140 |
+
self,
|
| 141 |
+
response,
|
| 142 |
+
conversation
|
| 143 |
+
) -> Tuple[str, List[Dict]]:
|
| 144 |
+
"""Process Claude's response and handle tool calls"""
|
| 145 |
+
tool_calls_made = []
|
| 146 |
+
|
| 147 |
+
# Check if Claude wants to use tools
|
| 148 |
+
if response.stop_reason == "tool_use":
|
| 149 |
+
# Execute tools
|
| 150 |
+
tool_results = []
|
| 151 |
+
|
| 152 |
+
for content_block in response.content:
|
| 153 |
+
if content_block.type == "tool_use":
|
| 154 |
+
tool_name = content_block.name
|
| 155 |
+
tool_input = content_block.input
|
| 156 |
+
|
| 157 |
+
logger.info(f"Claude executing tool: {tool_name}")
|
| 158 |
+
|
| 159 |
+
# Execute the tool
|
| 160 |
+
tool_result = execute_tool(tool_name, tool_input)
|
| 161 |
+
|
| 162 |
+
# Track for transparency
|
| 163 |
+
tool_calls_made.append({
|
| 164 |
+
"tool": tool_name,
|
| 165 |
+
"input": tool_input,
|
| 166 |
+
"result": tool_result
|
| 167 |
+
})
|
| 168 |
+
|
| 169 |
+
conversation.add_tool_result(tool_name, tool_input, tool_result)
|
| 170 |
+
|
| 171 |
+
# Prepare result for Claude
|
| 172 |
+
tool_results.append({
|
| 173 |
+
"type": "tool_result",
|
| 174 |
+
"tool_use_id": content_block.id,
|
| 175 |
+
"content": str(tool_result)
|
| 176 |
+
})
|
| 177 |
+
|
| 178 |
+
# Add assistant's tool use to history
|
| 179 |
+
conversation.add_message("assistant", response.content)
|
| 180 |
+
|
| 181 |
+
# Add tool results to history
|
| 182 |
+
conversation.add_message("user", tool_results)
|
| 183 |
+
|
| 184 |
+
# Continue conversation with tool results
|
| 185 |
+
followup_response = self.client.messages.create(
|
| 186 |
+
model=self.model,
|
| 187 |
+
max_tokens=4096,
|
| 188 |
+
system=self.system_prompt,
|
| 189 |
+
tools=TOOLS_SCHEMA,
|
| 190 |
+
messages=conversation.get_history()
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Extract final text response
|
| 194 |
+
final_text = self._extract_text_response(followup_response)
|
| 195 |
+
conversation.add_message("assistant", final_text)
|
| 196 |
+
|
| 197 |
+
return final_text, tool_calls_made
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
# No tool use, just text response
|
| 201 |
+
text_response = self._extract_text_response(response)
|
| 202 |
+
conversation.add_message("assistant", text_response)
|
| 203 |
+
return text_response, tool_calls_made
|
| 204 |
+
|
| 205 |
+
def _extract_text_response(self, response) -> str:
|
| 206 |
+
"""Extract text content from Claude's response"""
|
| 207 |
+
text_parts = []
|
| 208 |
+
for block in response.content:
|
| 209 |
+
if hasattr(block, 'text'):
|
| 210 |
+
text_parts.append(block.text)
|
| 211 |
+
elif block.type == "text":
|
| 212 |
+
text_parts.append(block.text if hasattr(block, 'text') else str(block))
|
| 213 |
+
|
| 214 |
+
return "\n".join(text_parts) if text_parts else "I apologize, but I couldn't generate a response."
|
| 215 |
+
|
| 216 |
+
def _handle_no_api(self) -> str:
|
| 217 |
+
"""Return error message when API is not available"""
|
| 218 |
+
return """β οΈ **Claude API requires Anthropic API key**
|
| 219 |
+
|
| 220 |
+
To use Claude:
|
| 221 |
+
|
| 222 |
+
1. Get an API key from: https://console.anthropic.com/
|
| 223 |
+
- Sign up for free ($5 credit available)
|
| 224 |
+
- Or use hackathon credits
|
| 225 |
+
|
| 226 |
+
2. Add to your `.env` file:
|
| 227 |
+
```
|
| 228 |
+
ANTHROPIC_API_KEY=sk-ant-your-key-here
|
| 229 |
+
```
|
| 230 |
+
|
| 231 |
+
3. Restart the application
|
| 232 |
+
|
| 233 |
+
**Alternative:** Switch to Gemini by setting `AI_PROVIDER=gemini` in .env
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
def get_welcome_message(self) -> str:
|
| 237 |
+
if not self.api_available:
|
| 238 |
+
return self._handle_no_api()
|
| 239 |
+
|
| 240 |
+
return """π Hello! I'm your AI dispatch assistant powered by **Claude Sonnet 3.5**.
|
| 241 |
+
|
| 242 |
+
I can help you create delivery orders quickly and efficiently!
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
π **To Create an Order, Provide:**
|
| 247 |
+
|
| 248 |
+
**Required:**
|
| 249 |
+
β’ Customer Name
|
| 250 |
+
β’ Delivery Address
|
| 251 |
+
β’ Contact (Phone OR Email)
|
| 252 |
+
|
| 253 |
+
**Optional:**
|
| 254 |
+
β’ Delivery Deadline (default: 6 hours)
|
| 255 |
+
β’ Priority: standard/express/urgent (default: standard)
|
| 256 |
+
β’ Special Instructions
|
| 257 |
+
β’ Package Weight in kg (default: 5.0)
|
| 258 |
+
|
| 259 |
+
---
|
| 260 |
+
|
| 261 |
+
**Quick Start Examples:**
|
| 262 |
+
|
| 263 |
+
β
*Complete:* "Create order for John Doe, 123 Main St San Francisco CA, phone 555-1234, deliver by 5 PM"
|
| 264 |
+
|
| 265 |
+
β
*Partial:* "I need a delivery for Sarah" *(I'll ask for missing details)*
|
| 266 |
+
|
| 267 |
+
β
*Natural:* "Urgent package to john@email.com at 456 Market Street"
|
| 268 |
+
|
| 269 |
+
---
|
| 270 |
+
|
| 271 |
+
What would you like to do?"""
|
chat/providers/gemini_provider.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Google Gemini provider for FleetMind chat
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
from typing import Tuple, List, Dict
|
| 8 |
+
import google.generativeai as genai
|
| 9 |
+
from google.generativeai.types import HarmCategory, HarmBlockThreshold
|
| 10 |
+
|
| 11 |
+
from chat.providers.base_provider import AIProvider
|
| 12 |
+
from chat.tools import execute_tool
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class GeminiProvider(AIProvider):
|
| 18 |
+
"""Google Gemini AI provider"""
|
| 19 |
+
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.api_key = os.getenv("GOOGLE_API_KEY", "")
|
| 22 |
+
self.api_available = bool(self.api_key and not self.api_key.startswith("your_"))
|
| 23 |
+
self.model_name = "gemini-2.0-flash"
|
| 24 |
+
self.model = None
|
| 25 |
+
self._initialized = False
|
| 26 |
+
|
| 27 |
+
if not self.api_available:
|
| 28 |
+
logger.warning("GeminiProvider: GOOGLE_API_KEY not configured")
|
| 29 |
+
else:
|
| 30 |
+
logger.info("GeminiProvider: Ready (will initialize on first use)")
|
| 31 |
+
|
| 32 |
+
def _get_system_prompt(self) -> str:
|
| 33 |
+
"""Get the system prompt for Gemini"""
|
| 34 |
+
return """You are an AI assistant for FleetMind, a delivery dispatch system.
|
| 35 |
+
|
| 36 |
+
**π¨ CRITICAL RULES - READ CAREFULLY:**
|
| 37 |
+
|
| 38 |
+
1. **NEVER return text in the middle of tool calls**
|
| 39 |
+
- If you need to call multiple tools, call them ALL in sequence
|
| 40 |
+
- Only return text AFTER all tools are complete
|
| 41 |
+
|
| 42 |
+
2. **Order Creation MUST be a single automated flow:**
|
| 43 |
+
- Step 1: Call geocode_address (get coordinates)
|
| 44 |
+
- Step 2: IMMEDIATELY call create_order (save to database)
|
| 45 |
+
- Step 3: ONLY THEN return success message
|
| 46 |
+
- DO NOT stop between Step 1 and Step 2
|
| 47 |
+
- DO NOT say "Now creating order..." - just DO it!
|
| 48 |
+
|
| 49 |
+
3. **Driver Creation is a SINGLE tool call:**
|
| 50 |
+
- When user wants to create a driver, call create_driver immediately
|
| 51 |
+
- NO geocoding needed for drivers
|
| 52 |
+
- Just call create_driver β confirm
|
| 53 |
+
|
| 54 |
+
4. **If user provides required info, START IMMEDIATELY:**
|
| 55 |
+
- For Orders: Customer name, address, contact (phone OR email)
|
| 56 |
+
- For Drivers: Driver name (phone/email optional)
|
| 57 |
+
- If all present β execute β confirm
|
| 58 |
+
- If missing β ask ONCE for all missing fields
|
| 59 |
+
|
| 60 |
+
**Example of CORRECT behavior:**
|
| 61 |
+
|
| 62 |
+
ORDER:
|
| 63 |
+
User: "Create order for John Doe, 123 Main St SF, phone 555-1234"
|
| 64 |
+
You: [geocode_address] β [create_order] β "β
Order ORD-123 created!"
|
| 65 |
+
(ALL in one response, no intermediate text)
|
| 66 |
+
|
| 67 |
+
DRIVER:
|
| 68 |
+
User: "Add new driver Mike Johnson, phone 555-0101, drives a van"
|
| 69 |
+
You: [create_driver] β "β
Driver DRV-123 (Mike Johnson) added to fleet!"
|
| 70 |
+
(Single tool call, immediate response)
|
| 71 |
+
|
| 72 |
+
**Example of WRONG behavior (DO NOT DO THIS):**
|
| 73 |
+
User: "Create order for John Doe..."
|
| 74 |
+
You: [geocode_address] β "OK geocoded, now creating..." β WRONG!
|
| 75 |
+
|
| 76 |
+
**Available Tools:**
|
| 77 |
+
- geocode_address: Convert address to GPS coordinates
|
| 78 |
+
- create_order: Create customer delivery order (REQUIRES geocoded address)
|
| 79 |
+
- create_driver: Add new driver/delivery man to fleet
|
| 80 |
+
|
| 81 |
+
**Order Fields:**
|
| 82 |
+
Required: customer_name, delivery_address, contact
|
| 83 |
+
Optional: time_window_end, priority (standard/express/urgent), special_instructions, weight_kg
|
| 84 |
+
|
| 85 |
+
**Driver Fields:**
|
| 86 |
+
Required: name
|
| 87 |
+
Optional: phone, email, vehicle_type (van/truck/car/motorcycle), vehicle_plate, capacity_kg, capacity_m3, skills (list), status (active/busy/offline)
|
| 88 |
+
|
| 89 |
+
**Your goal:** Execute tasks in ONE smooth automated flow. No stopping, no intermediate messages!"""
|
| 90 |
+
|
| 91 |
+
def _get_gemini_tools(self) -> list:
|
| 92 |
+
"""Convert tool schemas to Gemini function calling format"""
|
| 93 |
+
# Gemini expects tools wrapped in function_declarations
|
| 94 |
+
return [
|
| 95 |
+
genai.protos.Tool(
|
| 96 |
+
function_declarations=[
|
| 97 |
+
genai.protos.FunctionDeclaration(
|
| 98 |
+
name="geocode_address",
|
| 99 |
+
description="Convert a delivery address to GPS coordinates and validate the address format. Use this before creating an order to ensure the address is valid.",
|
| 100 |
+
parameters=genai.protos.Schema(
|
| 101 |
+
type=genai.protos.Type.OBJECT,
|
| 102 |
+
properties={
|
| 103 |
+
"address": genai.protos.Schema(
|
| 104 |
+
type=genai.protos.Type.STRING,
|
| 105 |
+
description="The full delivery address to geocode (e.g., '123 Main St, San Francisco, CA')"
|
| 106 |
+
)
|
| 107 |
+
},
|
| 108 |
+
required=["address"]
|
| 109 |
+
)
|
| 110 |
+
),
|
| 111 |
+
genai.protos.FunctionDeclaration(
|
| 112 |
+
name="create_order",
|
| 113 |
+
description="Create a new delivery order in the database. Only call this after geocoding the address successfully.",
|
| 114 |
+
parameters=genai.protos.Schema(
|
| 115 |
+
type=genai.protos.Type.OBJECT,
|
| 116 |
+
properties={
|
| 117 |
+
"customer_name": genai.protos.Schema(
|
| 118 |
+
type=genai.protos.Type.STRING,
|
| 119 |
+
description="Full name of the customer"
|
| 120 |
+
),
|
| 121 |
+
"customer_phone": genai.protos.Schema(
|
| 122 |
+
type=genai.protos.Type.STRING,
|
| 123 |
+
description="Customer phone number (optional)"
|
| 124 |
+
),
|
| 125 |
+
"customer_email": genai.protos.Schema(
|
| 126 |
+
type=genai.protos.Type.STRING,
|
| 127 |
+
description="Customer email address (optional)"
|
| 128 |
+
),
|
| 129 |
+
"delivery_address": genai.protos.Schema(
|
| 130 |
+
type=genai.protos.Type.STRING,
|
| 131 |
+
description="Full delivery address"
|
| 132 |
+
),
|
| 133 |
+
"delivery_lat": genai.protos.Schema(
|
| 134 |
+
type=genai.protos.Type.NUMBER,
|
| 135 |
+
description="Latitude from geocoding"
|
| 136 |
+
),
|
| 137 |
+
"delivery_lng": genai.protos.Schema(
|
| 138 |
+
type=genai.protos.Type.NUMBER,
|
| 139 |
+
description="Longitude from geocoding"
|
| 140 |
+
),
|
| 141 |
+
"time_window_end": genai.protos.Schema(
|
| 142 |
+
type=genai.protos.Type.STRING,
|
| 143 |
+
description="Delivery deadline in ISO format (e.g., '2025-11-13T17:00:00'). If not specified by user, default to 6 hours from now."
|
| 144 |
+
),
|
| 145 |
+
"priority": genai.protos.Schema(
|
| 146 |
+
type=genai.protos.Type.STRING,
|
| 147 |
+
description="Delivery priority. Default to 'standard' unless user specifies urgent/express."
|
| 148 |
+
),
|
| 149 |
+
"special_instructions": genai.protos.Schema(
|
| 150 |
+
type=genai.protos.Type.STRING,
|
| 151 |
+
description="Any special delivery instructions (optional)"
|
| 152 |
+
),
|
| 153 |
+
"weight_kg": genai.protos.Schema(
|
| 154 |
+
type=genai.protos.Type.NUMBER,
|
| 155 |
+
description="Package weight in kilograms (optional, default to 5.0)"
|
| 156 |
+
)
|
| 157 |
+
},
|
| 158 |
+
required=["customer_name", "delivery_address", "delivery_lat", "delivery_lng"]
|
| 159 |
+
)
|
| 160 |
+
),
|
| 161 |
+
genai.protos.FunctionDeclaration(
|
| 162 |
+
name="create_driver",
|
| 163 |
+
description="Create a new delivery driver/delivery man in the database. Use this to onboard new drivers to the fleet.",
|
| 164 |
+
parameters=genai.protos.Schema(
|
| 165 |
+
type=genai.protos.Type.OBJECT,
|
| 166 |
+
properties={
|
| 167 |
+
"name": genai.protos.Schema(
|
| 168 |
+
type=genai.protos.Type.STRING,
|
| 169 |
+
description="Full name of the driver"
|
| 170 |
+
),
|
| 171 |
+
"phone": genai.protos.Schema(
|
| 172 |
+
type=genai.protos.Type.STRING,
|
| 173 |
+
description="Driver phone number"
|
| 174 |
+
),
|
| 175 |
+
"email": genai.protos.Schema(
|
| 176 |
+
type=genai.protos.Type.STRING,
|
| 177 |
+
description="Driver email address (optional)"
|
| 178 |
+
),
|
| 179 |
+
"vehicle_type": genai.protos.Schema(
|
| 180 |
+
type=genai.protos.Type.STRING,
|
| 181 |
+
description="Type of vehicle: van, truck, car, motorcycle (default: van)"
|
| 182 |
+
),
|
| 183 |
+
"vehicle_plate": genai.protos.Schema(
|
| 184 |
+
type=genai.protos.Type.STRING,
|
| 185 |
+
description="Vehicle license plate number"
|
| 186 |
+
),
|
| 187 |
+
"capacity_kg": genai.protos.Schema(
|
| 188 |
+
type=genai.protos.Type.NUMBER,
|
| 189 |
+
description="Vehicle cargo capacity in kilograms (default: 1000.0)"
|
| 190 |
+
),
|
| 191 |
+
"capacity_m3": genai.protos.Schema(
|
| 192 |
+
type=genai.protos.Type.NUMBER,
|
| 193 |
+
description="Vehicle cargo volume in cubic meters (default: 12.0)"
|
| 194 |
+
),
|
| 195 |
+
"skills": genai.protos.Schema(
|
| 196 |
+
type=genai.protos.Type.ARRAY,
|
| 197 |
+
description="List of driver skills/certifications: refrigerated, medical_certified, fragile_handler, overnight, express_delivery",
|
| 198 |
+
items=genai.protos.Schema(type=genai.protos.Type.STRING)
|
| 199 |
+
),
|
| 200 |
+
"status": genai.protos.Schema(
|
| 201 |
+
type=genai.protos.Type.STRING,
|
| 202 |
+
description="Driver status: active, busy, offline, unavailable (default: active)"
|
| 203 |
+
)
|
| 204 |
+
},
|
| 205 |
+
required=["name"]
|
| 206 |
+
)
|
| 207 |
+
)
|
| 208 |
+
]
|
| 209 |
+
)
|
| 210 |
+
]
|
| 211 |
+
|
| 212 |
+
def _ensure_initialized(self):
|
| 213 |
+
"""Lazy initialization - only create model when first needed"""
|
| 214 |
+
if self._initialized or not self.api_available:
|
| 215 |
+
return
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
genai.configure(api_key=self.api_key)
|
| 219 |
+
self.model = genai.GenerativeModel(
|
| 220 |
+
model_name=self.model_name,
|
| 221 |
+
tools=self._get_gemini_tools(),
|
| 222 |
+
system_instruction=self._get_system_prompt()
|
| 223 |
+
)
|
| 224 |
+
self._initialized = True
|
| 225 |
+
logger.info(f"GeminiProvider: Model initialized ({self.model_name})")
|
| 226 |
+
except Exception as e:
|
| 227 |
+
logger.error(f"GeminiProvider: Failed to initialize: {e}")
|
| 228 |
+
self.api_available = False
|
| 229 |
+
self.model = None
|
| 230 |
+
|
| 231 |
+
def is_available(self) -> bool:
|
| 232 |
+
return self.api_available
|
| 233 |
+
|
| 234 |
+
def get_status(self) -> str:
|
| 235 |
+
if self.api_available:
|
| 236 |
+
return f"β
Connected - Model: {self.model_name}"
|
| 237 |
+
return "β οΈ Not configured (add GOOGLE_API_KEY)"
|
| 238 |
+
|
| 239 |
+
def get_provider_name(self) -> str:
|
| 240 |
+
return "Gemini (Google)"
|
| 241 |
+
|
| 242 |
+
def get_model_name(self) -> str:
|
| 243 |
+
return self.model_name if self.api_available else "gemini-2.0-flash"
|
| 244 |
+
|
| 245 |
+
def process_message(
|
| 246 |
+
self,
|
| 247 |
+
user_message: str,
|
| 248 |
+
conversation
|
| 249 |
+
) -> Tuple[str, List[Dict]]:
|
| 250 |
+
"""Process user message with Gemini"""
|
| 251 |
+
if not self.api_available:
|
| 252 |
+
return self._handle_no_api(), []
|
| 253 |
+
|
| 254 |
+
# Lazy initialization on first use
|
| 255 |
+
self._ensure_initialized()
|
| 256 |
+
|
| 257 |
+
if not self._initialized:
|
| 258 |
+
return "β οΈ Failed to initialize Gemini model. Please check your API key and try again.", []
|
| 259 |
+
|
| 260 |
+
try:
|
| 261 |
+
# Build conversation history for Gemini
|
| 262 |
+
chat = self.model.start_chat(history=self._convert_history(conversation))
|
| 263 |
+
|
| 264 |
+
# Send message and get response
|
| 265 |
+
response = chat.send_message(
|
| 266 |
+
user_message,
|
| 267 |
+
safety_settings={
|
| 268 |
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
|
| 269 |
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
|
| 270 |
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
| 271 |
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
| 272 |
+
}
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
# Add user message to conversation
|
| 276 |
+
conversation.add_message("user", user_message)
|
| 277 |
+
|
| 278 |
+
# Process response and handle function calls
|
| 279 |
+
return self._process_response(response, conversation, chat)
|
| 280 |
+
|
| 281 |
+
except Exception as e:
|
| 282 |
+
error_msg = f"β οΈ Gemini API error: {str(e)}"
|
| 283 |
+
logger.error(f"Gemini provider error: {e}")
|
| 284 |
+
return error_msg, []
|
| 285 |
+
|
| 286 |
+
def _convert_history(self, conversation) -> list:
|
| 287 |
+
"""Convert conversation history to Gemini format"""
|
| 288 |
+
history = []
|
| 289 |
+
# Get all messages from conversation (history is built before adding current message)
|
| 290 |
+
for msg in conversation.get_history():
|
| 291 |
+
role = "user" if msg["role"] == "user" else "model"
|
| 292 |
+
history.append({
|
| 293 |
+
"role": role,
|
| 294 |
+
"parts": [{"text": str(msg["content"])}]
|
| 295 |
+
})
|
| 296 |
+
return history
|
| 297 |
+
|
| 298 |
+
def _process_response(
|
| 299 |
+
self,
|
| 300 |
+
response,
|
| 301 |
+
conversation,
|
| 302 |
+
chat
|
| 303 |
+
) -> Tuple[str, List[Dict]]:
|
| 304 |
+
"""Process Gemini's response and handle function calls"""
|
| 305 |
+
tool_calls_made = []
|
| 306 |
+
|
| 307 |
+
# Check if Gemini wants to call functions
|
| 308 |
+
try:
|
| 309 |
+
# Check ALL parts for function calls (not just first)
|
| 310 |
+
has_function_call = False
|
| 311 |
+
parts = response.candidates[0].content.parts
|
| 312 |
+
logger.info(f"Processing response with {len(parts)} part(s)")
|
| 313 |
+
|
| 314 |
+
for part in parts:
|
| 315 |
+
if hasattr(part, 'function_call'):
|
| 316 |
+
fc = part.function_call
|
| 317 |
+
# More robust check
|
| 318 |
+
if fc is not None:
|
| 319 |
+
try:
|
| 320 |
+
if hasattr(fc, 'name') and fc.name:
|
| 321 |
+
has_function_call = True
|
| 322 |
+
logger.info(f"Detected function call: {fc.name}")
|
| 323 |
+
break
|
| 324 |
+
except Exception as e:
|
| 325 |
+
logger.warning(f"Error checking function call: {e}")
|
| 326 |
+
|
| 327 |
+
if has_function_call:
|
| 328 |
+
# Handle function calls (potentially multiple in sequence)
|
| 329 |
+
current_response = response
|
| 330 |
+
max_iterations = 10 # Allow more iterations for complex tasks
|
| 331 |
+
|
| 332 |
+
for iteration in range(max_iterations):
|
| 333 |
+
# Check if current response has a function call
|
| 334 |
+
try:
|
| 335 |
+
parts = current_response.candidates[0].content.parts
|
| 336 |
+
logger.info(f"Iteration {iteration + 1}: Response has {len(parts)} part(s)")
|
| 337 |
+
except (IndexError, AttributeError) as e:
|
| 338 |
+
logger.error(f"Cannot access response parts: {e}")
|
| 339 |
+
break
|
| 340 |
+
|
| 341 |
+
# Check ALL parts for function calls (some responses have text + function_call)
|
| 342 |
+
has_fc = False
|
| 343 |
+
fc_part = None
|
| 344 |
+
|
| 345 |
+
for idx, part in enumerate(parts):
|
| 346 |
+
if hasattr(part, 'function_call'):
|
| 347 |
+
fc = part.function_call
|
| 348 |
+
if fc and hasattr(fc, 'name') and fc.name:
|
| 349 |
+
has_fc = True
|
| 350 |
+
fc_part = part
|
| 351 |
+
logger.info(f"Iteration {iteration + 1}: Found function_call in part {idx}: {fc.name}")
|
| 352 |
+
break
|
| 353 |
+
|
| 354 |
+
# Also check if there's text (indicates Gemini wants to respond instead of continuing)
|
| 355 |
+
if hasattr(part, 'text') and part.text:
|
| 356 |
+
logger.warning(f"Iteration {iteration + 1}: Part {idx} has text: {part.text[:100]}")
|
| 357 |
+
|
| 358 |
+
if not has_fc:
|
| 359 |
+
# No more function calls, break and extract text
|
| 360 |
+
logger.info(f"No more function calls after iteration {iteration + 1}")
|
| 361 |
+
break
|
| 362 |
+
|
| 363 |
+
# Use the part with function_call
|
| 364 |
+
first_part = fc_part
|
| 365 |
+
|
| 366 |
+
# Extract function call details
|
| 367 |
+
function_call = first_part.function_call
|
| 368 |
+
function_name = function_call.name
|
| 369 |
+
function_args = dict(function_call.args) if function_call.args else {}
|
| 370 |
+
|
| 371 |
+
logger.info(f"Gemini executing function: {function_name} (iteration {iteration + 1})")
|
| 372 |
+
|
| 373 |
+
# Execute the tool
|
| 374 |
+
tool_result = execute_tool(function_name, function_args)
|
| 375 |
+
|
| 376 |
+
# Track for transparency
|
| 377 |
+
tool_calls_made.append({
|
| 378 |
+
"tool": function_name,
|
| 379 |
+
"input": function_args,
|
| 380 |
+
"result": tool_result
|
| 381 |
+
})
|
| 382 |
+
|
| 383 |
+
conversation.add_tool_result(function_name, function_args, tool_result)
|
| 384 |
+
|
| 385 |
+
# Send function result back to Gemini
|
| 386 |
+
try:
|
| 387 |
+
current_response = chat.send_message(
|
| 388 |
+
genai.protos.Content(
|
| 389 |
+
parts=[genai.protos.Part(
|
| 390 |
+
function_response=genai.protos.FunctionResponse(
|
| 391 |
+
name=function_name,
|
| 392 |
+
response={"result": tool_result}
|
| 393 |
+
)
|
| 394 |
+
)]
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
except Exception as e:
|
| 398 |
+
logger.error(f"Error sending function response: {e}")
|
| 399 |
+
break
|
| 400 |
+
|
| 401 |
+
# Now extract text from the final response
|
| 402 |
+
# NEVER use .text property directly - always check parts
|
| 403 |
+
final_text = ""
|
| 404 |
+
try:
|
| 405 |
+
parts = current_response.candidates[0].content.parts
|
| 406 |
+
logger.info(f"Extracting text from {len(parts)} parts")
|
| 407 |
+
|
| 408 |
+
for idx, part in enumerate(parts):
|
| 409 |
+
# Check if this part has a function call
|
| 410 |
+
if hasattr(part, 'function_call') and part.function_call:
|
| 411 |
+
fc = part.function_call
|
| 412 |
+
if hasattr(fc, 'name') and fc.name:
|
| 413 |
+
logger.warning(f"Part {idx} still has function call: {fc.name}. Skipping.")
|
| 414 |
+
continue
|
| 415 |
+
|
| 416 |
+
# Extract text from this part
|
| 417 |
+
if hasattr(part, 'text') and part.text:
|
| 418 |
+
logger.info(f"Part {idx} has text: {part.text[:50]}...")
|
| 419 |
+
final_text += part.text
|
| 420 |
+
|
| 421 |
+
except (AttributeError, IndexError) as e:
|
| 422 |
+
logger.error(f"Error extracting text from parts: {e}")
|
| 423 |
+
|
| 424 |
+
# Generate fallback message if still no text
|
| 425 |
+
if not final_text:
|
| 426 |
+
logger.warning("No text extracted from response, generating fallback")
|
| 427 |
+
if tool_calls_made:
|
| 428 |
+
# Create a summary of what was done
|
| 429 |
+
tool_names = [t["tool"] for t in tool_calls_made]
|
| 430 |
+
if "create_order" in tool_names:
|
| 431 |
+
# Check if order was created successfully
|
| 432 |
+
create_result = next((t["result"] for t in tool_calls_made if t["tool"] == "create_order"), {})
|
| 433 |
+
if create_result.get("success"):
|
| 434 |
+
order_id = create_result.get("order_id", "")
|
| 435 |
+
final_text = f"β
Order {order_id} created successfully!"
|
| 436 |
+
else:
|
| 437 |
+
final_text = "β οΈ There was an issue creating the order."
|
| 438 |
+
else:
|
| 439 |
+
final_text = f"β
Executed {len(tool_calls_made)} tool(s) successfully!"
|
| 440 |
+
else:
|
| 441 |
+
final_text = "β
Task completed!"
|
| 442 |
+
|
| 443 |
+
logger.info(f"Returning response: {final_text[:100]}")
|
| 444 |
+
conversation.add_message("assistant", final_text)
|
| 445 |
+
return final_text, tool_calls_made
|
| 446 |
+
|
| 447 |
+
else:
|
| 448 |
+
# No function call detected, extract text from parts
|
| 449 |
+
text_response = ""
|
| 450 |
+
try:
|
| 451 |
+
parts = response.candidates[0].content.parts
|
| 452 |
+
logger.info(f"Extracting text from {len(parts)} parts (no function call)")
|
| 453 |
+
|
| 454 |
+
for idx, part in enumerate(parts):
|
| 455 |
+
# Double-check no function call in this part
|
| 456 |
+
if hasattr(part, 'function_call') and part.function_call:
|
| 457 |
+
fc = part.function_call
|
| 458 |
+
if hasattr(fc, 'name') and fc.name:
|
| 459 |
+
logger.error(f"Part {idx} has function call {fc.name} but was not detected earlier!")
|
| 460 |
+
# We missed a function call - handle it now
|
| 461 |
+
logger.info("Re-processing response with function call handling")
|
| 462 |
+
return self._process_response(response, conversation, chat)
|
| 463 |
+
|
| 464 |
+
# Extract text
|
| 465 |
+
if hasattr(part, 'text') and part.text:
|
| 466 |
+
logger.info(f"Part {idx} has text: {part.text[:50]}...")
|
| 467 |
+
text_response += part.text
|
| 468 |
+
|
| 469 |
+
except (ValueError, AttributeError, IndexError) as e:
|
| 470 |
+
logger.error(f"Error extracting text from response: {e}")
|
| 471 |
+
|
| 472 |
+
# Fallback if no text extracted
|
| 473 |
+
if not text_response:
|
| 474 |
+
logger.warning("No text in response, using fallback")
|
| 475 |
+
text_response = "I'm ready to help! What would you like me to do?"
|
| 476 |
+
|
| 477 |
+
conversation.add_message("assistant", text_response)
|
| 478 |
+
return text_response, tool_calls_made
|
| 479 |
+
|
| 480 |
+
except Exception as e:
|
| 481 |
+
logger.error(f"Error processing Gemini response: {e}")
|
| 482 |
+
error_msg = f"β οΈ Error processing response: {str(e)}"
|
| 483 |
+
conversation.add_message("assistant", error_msg)
|
| 484 |
+
return error_msg, tool_calls_made
|
| 485 |
+
|
| 486 |
+
def _handle_no_api(self) -> str:
|
| 487 |
+
"""Return error message when API is not available"""
|
| 488 |
+
return """β οΈ **Gemini API requires Google API key**
|
| 489 |
+
|
| 490 |
+
To use Gemini:
|
| 491 |
+
|
| 492 |
+
1. Get an API key from: https://aistudio.google.com/app/apikey
|
| 493 |
+
- Free tier: 15 requests/min, 1500/day
|
| 494 |
+
- Or use hackathon credits
|
| 495 |
+
|
| 496 |
+
2. Add to your `.env` file:
|
| 497 |
+
```
|
| 498 |
+
GOOGLE_API_KEY=your-gemini-key-here
|
| 499 |
+
```
|
| 500 |
+
|
| 501 |
+
3. Restart the application
|
| 502 |
+
|
| 503 |
+
**Alternative:** Switch to Claude by setting `AI_PROVIDER=anthropic` in .env
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
def get_welcome_message(self) -> str:
|
| 507 |
+
if not self.api_available:
|
| 508 |
+
return self._handle_no_api()
|
| 509 |
+
|
| 510 |
+
# Initialize on first use (welcome message)
|
| 511 |
+
self._ensure_initialized()
|
| 512 |
+
|
| 513 |
+
return """π Hello! I'm your AI dispatch assistant powered by **Google Gemini 2.0 Flash**.
|
| 514 |
+
|
| 515 |
+
I can help you manage your delivery fleet!
|
| 516 |
+
|
| 517 |
+
---
|
| 518 |
+
|
| 519 |
+
π **What I Can Do:**
|
| 520 |
+
|
| 521 |
+
**1. Create Delivery Orders:**
|
| 522 |
+
β’ Customer Name
|
| 523 |
+
β’ Delivery Address
|
| 524 |
+
β’ Contact (Phone OR Email)
|
| 525 |
+
β’ Optional: Deadline, Priority, Special Instructions
|
| 526 |
+
|
| 527 |
+
**2. Add New Drivers:**
|
| 528 |
+
β’ Driver Name (required)
|
| 529 |
+
β’ Optional: Phone, Email, Vehicle Type, License Plate, Skills
|
| 530 |
+
|
| 531 |
+
---
|
| 532 |
+
|
| 533 |
+
**Examples - Just Type Naturally:**
|
| 534 |
+
|
| 535 |
+
π¦ **Orders:**
|
| 536 |
+
π¬ "Create order for John Doe, 123 Main St San Francisco CA, phone 555-1234, deliver by 5 PM"
|
| 537 |
+
π¬ "New urgent delivery to Sarah at 456 Oak Ave NYC, email sarah@email.com"
|
| 538 |
+
|
| 539 |
+
π **Drivers:**
|
| 540 |
+
π¬ "Add new driver Tom Wilson, phone 555-0101, drives a van, plate ABC-123"
|
| 541 |
+
π¬ "Create driver Sarah Martinez with refrigerated truck, phone 555-0202"
|
| 542 |
+
π¬ "New driver: Mike Chen, email mike@fleet.com, motorcycle delivery"
|
| 543 |
+
|
| 544 |
+
---
|
| 545 |
+
|
| 546 |
+
π **I'll automatically:**
|
| 547 |
+
β’ Geocode addresses for orders
|
| 548 |
+
β’ Generate unique IDs
|
| 549 |
+
β’ Save everything to the database
|
| 550 |
+
|
| 551 |
+
What would you like to do?"""
|
chat/tools.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tool definitions and execution handlers for FleetMind chat
|
| 3 |
+
Simulates MCP tools using Claude's tool calling feature
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from datetime import datetime, timedelta
|
| 9 |
+
import logging
|
| 10 |
+
|
| 11 |
+
# Add parent directory to path
|
| 12 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 13 |
+
|
| 14 |
+
from database.connection import execute_write
|
| 15 |
+
from chat.geocoding import GeocodingService
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# Initialize geocoding service
|
| 20 |
+
geocoding_service = GeocodingService()
|
| 21 |
+
|
| 22 |
+
# Tool schemas for Claude
|
| 23 |
+
TOOLS_SCHEMA = [
|
| 24 |
+
{
|
| 25 |
+
"name": "geocode_address",
|
| 26 |
+
"description": "Convert a delivery address to GPS coordinates and validate the address format. Use this before creating an order to ensure the address is valid.",
|
| 27 |
+
"input_schema": {
|
| 28 |
+
"type": "object",
|
| 29 |
+
"properties": {
|
| 30 |
+
"address": {
|
| 31 |
+
"type": "string",
|
| 32 |
+
"description": "The full delivery address to geocode (e.g., '123 Main St, San Francisco, CA')"
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"required": ["address"]
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"name": "create_order",
|
| 40 |
+
"description": "Create a new delivery order in the database. Only call this after geocoding the address successfully.",
|
| 41 |
+
"input_schema": {
|
| 42 |
+
"type": "object",
|
| 43 |
+
"properties": {
|
| 44 |
+
"customer_name": {
|
| 45 |
+
"type": "string",
|
| 46 |
+
"description": "Full name of the customer"
|
| 47 |
+
},
|
| 48 |
+
"customer_phone": {
|
| 49 |
+
"type": "string",
|
| 50 |
+
"description": "Customer phone number (optional)"
|
| 51 |
+
},
|
| 52 |
+
"customer_email": {
|
| 53 |
+
"type": "string",
|
| 54 |
+
"description": "Customer email address (optional)"
|
| 55 |
+
},
|
| 56 |
+
"delivery_address": {
|
| 57 |
+
"type": "string",
|
| 58 |
+
"description": "Full delivery address"
|
| 59 |
+
},
|
| 60 |
+
"delivery_lat": {
|
| 61 |
+
"type": "number",
|
| 62 |
+
"description": "Latitude from geocoding"
|
| 63 |
+
},
|
| 64 |
+
"delivery_lng": {
|
| 65 |
+
"type": "number",
|
| 66 |
+
"description": "Longitude from geocoding"
|
| 67 |
+
},
|
| 68 |
+
"time_window_end": {
|
| 69 |
+
"type": "string",
|
| 70 |
+
"description": "Delivery deadline in ISO format (e.g., '2025-11-13T17:00:00'). If not specified by user, default to 6 hours from now."
|
| 71 |
+
},
|
| 72 |
+
"priority": {
|
| 73 |
+
"type": "string",
|
| 74 |
+
"enum": ["standard", "express", "urgent"],
|
| 75 |
+
"description": "Delivery priority. Default to 'standard' unless user specifies urgent/express."
|
| 76 |
+
},
|
| 77 |
+
"special_instructions": {
|
| 78 |
+
"type": "string",
|
| 79 |
+
"description": "Any special delivery instructions (optional)"
|
| 80 |
+
},
|
| 81 |
+
"weight_kg": {
|
| 82 |
+
"type": "number",
|
| 83 |
+
"description": "Package weight in kilograms (optional, default to 5.0)"
|
| 84 |
+
}
|
| 85 |
+
},
|
| 86 |
+
"required": ["customer_name", "delivery_address", "delivery_lat", "delivery_lng"]
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def execute_tool(tool_name: str, tool_input: dict) -> dict:
|
| 93 |
+
"""
|
| 94 |
+
Route tool execution to appropriate handler
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
tool_name: Name of the tool to execute
|
| 98 |
+
tool_input: Tool input parameters
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Dict with tool execution results
|
| 102 |
+
"""
|
| 103 |
+
try:
|
| 104 |
+
if tool_name == "geocode_address":
|
| 105 |
+
return handle_geocode_address(tool_input)
|
| 106 |
+
elif tool_name == "create_order":
|
| 107 |
+
return handle_create_order(tool_input)
|
| 108 |
+
elif tool_name == "create_driver":
|
| 109 |
+
return handle_create_driver(tool_input)
|
| 110 |
+
else:
|
| 111 |
+
return {
|
| 112 |
+
"success": False,
|
| 113 |
+
"error": f"Unknown tool: {tool_name}"
|
| 114 |
+
}
|
| 115 |
+
except Exception as e:
|
| 116 |
+
logger.error(f"Tool execution error ({tool_name}): {e}")
|
| 117 |
+
return {
|
| 118 |
+
"success": False,
|
| 119 |
+
"error": str(e)
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def handle_geocode_address(tool_input: dict) -> dict:
|
| 124 |
+
"""
|
| 125 |
+
Execute geocoding tool
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
tool_input: Dict with 'address' key
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
Geocoding result
|
| 132 |
+
"""
|
| 133 |
+
address = tool_input.get("address", "")
|
| 134 |
+
|
| 135 |
+
if not address:
|
| 136 |
+
return {
|
| 137 |
+
"success": False,
|
| 138 |
+
"error": "Address is required"
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
logger.info(f"Geocoding address: {address}")
|
| 142 |
+
|
| 143 |
+
result = geocoding_service.geocode(address)
|
| 144 |
+
|
| 145 |
+
return {
|
| 146 |
+
"success": True,
|
| 147 |
+
"latitude": result["lat"],
|
| 148 |
+
"longitude": result["lng"],
|
| 149 |
+
"formatted_address": result["formatted_address"],
|
| 150 |
+
"confidence": result["confidence"],
|
| 151 |
+
"message": f"Address geocoded successfully ({result['confidence']})"
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def handle_create_order(tool_input: dict) -> dict:
|
| 156 |
+
"""
|
| 157 |
+
Execute order creation tool
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
tool_input: Dict with order fields
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
Order creation result
|
| 164 |
+
"""
|
| 165 |
+
# Extract fields with defaults
|
| 166 |
+
customer_name = tool_input.get("customer_name")
|
| 167 |
+
customer_phone = tool_input.get("customer_phone")
|
| 168 |
+
customer_email = tool_input.get("customer_email")
|
| 169 |
+
delivery_address = tool_input.get("delivery_address")
|
| 170 |
+
delivery_lat = tool_input.get("delivery_lat")
|
| 171 |
+
delivery_lng = tool_input.get("delivery_lng")
|
| 172 |
+
priority = tool_input.get("priority", "standard")
|
| 173 |
+
special_instructions = tool_input.get("special_instructions")
|
| 174 |
+
weight_kg = tool_input.get("weight_kg", 5.0)
|
| 175 |
+
|
| 176 |
+
# Validate required fields
|
| 177 |
+
if not all([customer_name, delivery_address, delivery_lat, delivery_lng]):
|
| 178 |
+
return {
|
| 179 |
+
"success": False,
|
| 180 |
+
"error": "Missing required fields: customer_name, delivery_address, delivery_lat, delivery_lng"
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
# Generate order ID
|
| 184 |
+
now = datetime.now()
|
| 185 |
+
order_id = f"ORD-{now.strftime('%Y%m%d%H%M%S')}"
|
| 186 |
+
|
| 187 |
+
# Handle time window
|
| 188 |
+
time_window_end_str = tool_input.get("time_window_end")
|
| 189 |
+
if time_window_end_str:
|
| 190 |
+
try:
|
| 191 |
+
time_window_end = datetime.fromisoformat(time_window_end_str.replace('Z', '+00:00'))
|
| 192 |
+
except:
|
| 193 |
+
time_window_end = now + timedelta(hours=6)
|
| 194 |
+
else:
|
| 195 |
+
time_window_end = now + timedelta(hours=6)
|
| 196 |
+
|
| 197 |
+
time_window_start = now + timedelta(hours=2)
|
| 198 |
+
|
| 199 |
+
# Insert into database
|
| 200 |
+
query = """
|
| 201 |
+
INSERT INTO orders (
|
| 202 |
+
order_id, customer_name, customer_phone, customer_email,
|
| 203 |
+
delivery_address, delivery_lat, delivery_lng,
|
| 204 |
+
time_window_start, time_window_end,
|
| 205 |
+
priority, weight_kg, status, special_instructions
|
| 206 |
+
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
params = (
|
| 210 |
+
order_id,
|
| 211 |
+
customer_name,
|
| 212 |
+
customer_phone,
|
| 213 |
+
customer_email,
|
| 214 |
+
delivery_address,
|
| 215 |
+
delivery_lat,
|
| 216 |
+
delivery_lng,
|
| 217 |
+
time_window_start,
|
| 218 |
+
time_window_end,
|
| 219 |
+
priority,
|
| 220 |
+
weight_kg,
|
| 221 |
+
"pending",
|
| 222 |
+
special_instructions
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
try:
|
| 226 |
+
execute_write(query, params)
|
| 227 |
+
logger.info(f"Order created: {order_id}")
|
| 228 |
+
|
| 229 |
+
return {
|
| 230 |
+
"success": True,
|
| 231 |
+
"order_id": order_id,
|
| 232 |
+
"status": "pending",
|
| 233 |
+
"customer": customer_name,
|
| 234 |
+
"address": delivery_address,
|
| 235 |
+
"deadline": time_window_end.strftime("%Y-%m-%d %H:%M"),
|
| 236 |
+
"priority": priority,
|
| 237 |
+
"message": f"Order {order_id} created successfully!"
|
| 238 |
+
}
|
| 239 |
+
except Exception as e:
|
| 240 |
+
logger.error(f"Database error creating order: {e}")
|
| 241 |
+
return {
|
| 242 |
+
"success": False,
|
| 243 |
+
"error": f"Failed to create order: {str(e)}"
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def handle_create_driver(tool_input: dict) -> dict:
|
| 248 |
+
"""
|
| 249 |
+
Execute driver creation tool
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
tool_input: Dict with driver fields
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Driver creation result
|
| 256 |
+
"""
|
| 257 |
+
# Extract fields with defaults
|
| 258 |
+
name = tool_input.get("name")
|
| 259 |
+
phone = tool_input.get("phone")
|
| 260 |
+
email = tool_input.get("email")
|
| 261 |
+
vehicle_type = tool_input.get("vehicle_type", "van")
|
| 262 |
+
vehicle_plate = tool_input.get("vehicle_plate")
|
| 263 |
+
capacity_kg = tool_input.get("capacity_kg", 1000.0)
|
| 264 |
+
capacity_m3 = tool_input.get("capacity_m3", 12.0)
|
| 265 |
+
|
| 266 |
+
# Convert skills to regular list (handles protobuf RepeatedComposite)
|
| 267 |
+
skills_raw = tool_input.get("skills", [])
|
| 268 |
+
skills = list(skills_raw) if skills_raw else []
|
| 269 |
+
|
| 270 |
+
status = tool_input.get("status", "active")
|
| 271 |
+
|
| 272 |
+
# Validate required fields
|
| 273 |
+
if not name:
|
| 274 |
+
return {
|
| 275 |
+
"success": False,
|
| 276 |
+
"error": "Missing required field: name"
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
# Generate driver ID
|
| 280 |
+
now = datetime.now()
|
| 281 |
+
driver_id = f"DRV-{now.strftime('%Y%m%d%H%M%S')}"
|
| 282 |
+
|
| 283 |
+
# Default location (San Francisco)
|
| 284 |
+
current_lat = tool_input.get("current_lat", 37.7749)
|
| 285 |
+
current_lng = tool_input.get("current_lng", -122.4194)
|
| 286 |
+
|
| 287 |
+
# Insert into database
|
| 288 |
+
query = """
|
| 289 |
+
INSERT INTO drivers (
|
| 290 |
+
driver_id, name, phone, email,
|
| 291 |
+
current_lat, current_lng, last_location_update,
|
| 292 |
+
status, vehicle_type, vehicle_plate,
|
| 293 |
+
capacity_kg, capacity_m3, skills
|
| 294 |
+
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
| 295 |
+
"""
|
| 296 |
+
|
| 297 |
+
# Convert skills list to JSON
|
| 298 |
+
import json
|
| 299 |
+
skills_json = json.dumps(skills) if skills else json.dumps([])
|
| 300 |
+
|
| 301 |
+
params = (
|
| 302 |
+
driver_id,
|
| 303 |
+
name,
|
| 304 |
+
phone,
|
| 305 |
+
email,
|
| 306 |
+
current_lat,
|
| 307 |
+
current_lng,
|
| 308 |
+
now,
|
| 309 |
+
status,
|
| 310 |
+
vehicle_type,
|
| 311 |
+
vehicle_plate,
|
| 312 |
+
capacity_kg,
|
| 313 |
+
capacity_m3,
|
| 314 |
+
skills_json
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
try:
|
| 318 |
+
execute_write(query, params)
|
| 319 |
+
logger.info(f"Driver created: {driver_id}")
|
| 320 |
+
|
| 321 |
+
return {
|
| 322 |
+
"success": True,
|
| 323 |
+
"driver_id": driver_id,
|
| 324 |
+
"name": name,
|
| 325 |
+
"status": status,
|
| 326 |
+
"vehicle_type": vehicle_type,
|
| 327 |
+
"vehicle_plate": vehicle_plate,
|
| 328 |
+
"capacity_kg": capacity_kg,
|
| 329 |
+
"skills": skills,
|
| 330 |
+
"message": f"Driver {driver_id} ({name}) created successfully!"
|
| 331 |
+
}
|
| 332 |
+
except Exception as e:
|
| 333 |
+
logger.error(f"Database error creating driver: {e}")
|
| 334 |
+
return {
|
| 335 |
+
"success": False,
|
| 336 |
+
"error": f"Failed to create driver: {str(e)}"
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def get_tools_list() -> list:
|
| 341 |
+
"""Get list of available tools"""
|
| 342 |
+
return [tool["name"] for tool in TOOLS_SCHEMA]
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def get_tool_description(tool_name: str) -> str:
|
| 346 |
+
"""Get description for a specific tool"""
|
| 347 |
+
for tool in TOOLS_SCHEMA:
|
| 348 |
+
if tool["name"] == tool_name:
|
| 349 |
+
return tool["description"]
|
| 350 |
+
return ""
|
database/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database package for FleetMind MCP
|
| 3 |
+
Handles all database connections and operations
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .connection import get_db_connection, init_database
|
| 7 |
+
|
| 8 |
+
__all__ = ['get_db_connection', 'init_database']
|
database/connection.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database connection module for FleetMind
|
| 3 |
+
Handles PostgreSQL database connections and initialization
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import psycopg2
|
| 7 |
+
import psycopg2.extras
|
| 8 |
+
import os
|
| 9 |
+
from typing import Optional, List, Dict, Any
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import logging
|
| 12 |
+
from dotenv import load_dotenv
|
| 13 |
+
|
| 14 |
+
# Load environment variables
|
| 15 |
+
load_dotenv()
|
| 16 |
+
|
| 17 |
+
# Set up logging
|
| 18 |
+
logging.basicConfig(level=logging.INFO)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# Database configuration from environment variables
|
| 22 |
+
DB_CONFIG = {
|
| 23 |
+
'host': os.getenv('DB_HOST', 'localhost'),
|
| 24 |
+
'port': os.getenv('DB_PORT', '5432'),
|
| 25 |
+
'database': os.getenv('DB_NAME', 'fleetmind'),
|
| 26 |
+
'user': os.getenv('DB_USER', 'postgres'),
|
| 27 |
+
'password': os.getenv('DB_PASSWORD', ''),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_db_connection() -> psycopg2.extensions.connection:
|
| 32 |
+
"""
|
| 33 |
+
Create and return a PostgreSQL database connection.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
psycopg2.connection: Database connection object
|
| 37 |
+
|
| 38 |
+
Raises:
|
| 39 |
+
psycopg2.Error: If connection fails
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
conn = psycopg2.connect(
|
| 43 |
+
host=DB_CONFIG['host'],
|
| 44 |
+
port=DB_CONFIG['port'],
|
| 45 |
+
database=DB_CONFIG['database'],
|
| 46 |
+
user=DB_CONFIG['user'],
|
| 47 |
+
password=DB_CONFIG['password'],
|
| 48 |
+
cursor_factory=psycopg2.extras.RealDictCursor
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
logger.info(f"Database connection established: {DB_CONFIG['database']}@{DB_CONFIG['host']}")
|
| 52 |
+
return conn
|
| 53 |
+
|
| 54 |
+
except psycopg2.Error as e:
|
| 55 |
+
logger.error(f"Database connection error: {e}")
|
| 56 |
+
raise
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def init_database(schema_file: Optional[str] = None) -> None:
|
| 60 |
+
"""
|
| 61 |
+
Initialize the database with schema.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
schema_file: Path to SQL schema file. If None, uses default schema.
|
| 65 |
+
|
| 66 |
+
Raises:
|
| 67 |
+
psycopg2.Error: If initialization fails
|
| 68 |
+
"""
|
| 69 |
+
try:
|
| 70 |
+
conn = get_db_connection()
|
| 71 |
+
cursor = conn.cursor()
|
| 72 |
+
|
| 73 |
+
if schema_file and os.path.exists(schema_file):
|
| 74 |
+
# Read and execute schema from file
|
| 75 |
+
with open(schema_file, 'r') as f:
|
| 76 |
+
schema_sql = f.read()
|
| 77 |
+
cursor.execute(schema_sql)
|
| 78 |
+
logger.info(f"Database initialized from schema file: {schema_file}")
|
| 79 |
+
else:
|
| 80 |
+
# Use default schema
|
| 81 |
+
from .schema import SCHEMA_SQL
|
| 82 |
+
cursor.execute(SCHEMA_SQL)
|
| 83 |
+
logger.info("Database initialized with default schema")
|
| 84 |
+
|
| 85 |
+
conn.commit()
|
| 86 |
+
cursor.close()
|
| 87 |
+
conn.close()
|
| 88 |
+
|
| 89 |
+
logger.info("Database initialization completed successfully")
|
| 90 |
+
|
| 91 |
+
except psycopg2.Error as e:
|
| 92 |
+
logger.error(f"Database initialization error: {e}")
|
| 93 |
+
if conn:
|
| 94 |
+
conn.rollback()
|
| 95 |
+
raise
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def close_connection(conn: psycopg2.extensions.connection) -> None:
|
| 99 |
+
"""
|
| 100 |
+
Safely close database connection.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
conn: Database connection to close
|
| 104 |
+
"""
|
| 105 |
+
try:
|
| 106 |
+
if conn and not conn.closed:
|
| 107 |
+
conn.close()
|
| 108 |
+
logger.info("Database connection closed")
|
| 109 |
+
except psycopg2.Error as e:
|
| 110 |
+
logger.error(f"Error closing connection: {e}")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def execute_query(query: str, params: tuple = ()) -> List[Dict[str, Any]]:
|
| 114 |
+
"""
|
| 115 |
+
Execute a SELECT query and return results.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
query: SQL query string
|
| 119 |
+
params: Query parameters tuple
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
list: Query results as list of dictionaries
|
| 123 |
+
|
| 124 |
+
Raises:
|
| 125 |
+
psycopg2.Error: If query fails
|
| 126 |
+
"""
|
| 127 |
+
conn = get_db_connection()
|
| 128 |
+
try:
|
| 129 |
+
cursor = conn.cursor()
|
| 130 |
+
cursor.execute(query, params)
|
| 131 |
+
results = cursor.fetchall()
|
| 132 |
+
cursor.close()
|
| 133 |
+
return results
|
| 134 |
+
except psycopg2.Error as e:
|
| 135 |
+
logger.error(f"Query execution error: {e}")
|
| 136 |
+
raise
|
| 137 |
+
finally:
|
| 138 |
+
close_connection(conn)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def execute_write(query: str, params: tuple = ()) -> int:
|
| 142 |
+
"""
|
| 143 |
+
Execute an INSERT, UPDATE, or DELETE query.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
query: SQL query string
|
| 147 |
+
params: Query parameters tuple
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
int: Number of rows affected
|
| 151 |
+
|
| 152 |
+
Raises:
|
| 153 |
+
psycopg2.Error: If query fails
|
| 154 |
+
"""
|
| 155 |
+
conn = get_db_connection()
|
| 156 |
+
try:
|
| 157 |
+
cursor = conn.cursor()
|
| 158 |
+
cursor.execute(query, params)
|
| 159 |
+
rows_affected = cursor.rowcount
|
| 160 |
+
conn.commit()
|
| 161 |
+
cursor.close()
|
| 162 |
+
return rows_affected
|
| 163 |
+
except psycopg2.Error as e:
|
| 164 |
+
conn.rollback()
|
| 165 |
+
logger.error(f"Write query error: {e}")
|
| 166 |
+
raise
|
| 167 |
+
finally:
|
| 168 |
+
close_connection(conn)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def execute_many(query: str, params_list: List[tuple]) -> int:
|
| 172 |
+
"""
|
| 173 |
+
Execute multiple INSERT/UPDATE queries in a batch.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
query: SQL query string
|
| 177 |
+
params_list: List of parameter tuples
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
int: Number of rows affected
|
| 181 |
+
|
| 182 |
+
Raises:
|
| 183 |
+
psycopg2.Error: If query fails
|
| 184 |
+
"""
|
| 185 |
+
conn = get_db_connection()
|
| 186 |
+
try:
|
| 187 |
+
cursor = conn.cursor()
|
| 188 |
+
cursor.executemany(query, params_list)
|
| 189 |
+
rows_affected = cursor.rowcount
|
| 190 |
+
conn.commit()
|
| 191 |
+
cursor.close()
|
| 192 |
+
return rows_affected
|
| 193 |
+
except psycopg2.Error as e:
|
| 194 |
+
conn.rollback()
|
| 195 |
+
logger.error(f"Batch write error: {e}")
|
| 196 |
+
raise
|
| 197 |
+
finally:
|
| 198 |
+
close_connection(conn)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def test_connection() -> bool:
|
| 202 |
+
"""
|
| 203 |
+
Test database connection.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
bool: True if connection successful, False otherwise
|
| 207 |
+
"""
|
| 208 |
+
try:
|
| 209 |
+
conn = get_db_connection()
|
| 210 |
+
cursor = conn.cursor()
|
| 211 |
+
cursor.execute("SELECT version();")
|
| 212 |
+
version = cursor.fetchone()
|
| 213 |
+
logger.info(f"PostgreSQL version: {version['version']}")
|
| 214 |
+
cursor.close()
|
| 215 |
+
close_connection(conn)
|
| 216 |
+
return True
|
| 217 |
+
except Exception as e:
|
| 218 |
+
logger.error(f"Connection test failed: {e}")
|
| 219 |
+
return False
|
database/migrations/001_create_drivers_table.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Migration: Create drivers table
|
| 3 |
+
Created: 2025-11-14
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Add parent directory to path
|
| 10 |
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 11 |
+
|
| 12 |
+
from database.connection import get_db_connection
|
| 13 |
+
from database.schema import DRIVERS_SCHEMA
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def up():
|
| 21 |
+
"""Apply migration - Create drivers table"""
|
| 22 |
+
logger.info("Starting migration: Create drivers table")
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
conn = get_db_connection()
|
| 26 |
+
cursor = conn.cursor()
|
| 27 |
+
|
| 28 |
+
# Execute drivers schema
|
| 29 |
+
logger.info("Creating drivers table...")
|
| 30 |
+
cursor.execute(DRIVERS_SCHEMA)
|
| 31 |
+
|
| 32 |
+
conn.commit()
|
| 33 |
+
cursor.close()
|
| 34 |
+
conn.close()
|
| 35 |
+
|
| 36 |
+
logger.info("β
Migration completed successfully: Drivers table created")
|
| 37 |
+
return True
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
logger.error(f"β Migration failed: {e}")
|
| 41 |
+
if conn:
|
| 42 |
+
conn.rollback()
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def down():
|
| 47 |
+
"""Rollback migration - Drop drivers table"""
|
| 48 |
+
logger.info("Rolling back migration: Drop drivers table")
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
conn = get_db_connection()
|
| 52 |
+
cursor = conn.cursor()
|
| 53 |
+
|
| 54 |
+
logger.info("Dropping drivers table...")
|
| 55 |
+
cursor.execute("DROP TABLE IF EXISTS drivers CASCADE;")
|
| 56 |
+
|
| 57 |
+
conn.commit()
|
| 58 |
+
cursor.close()
|
| 59 |
+
conn.close()
|
| 60 |
+
|
| 61 |
+
logger.info("β
Rollback completed successfully: Drivers table dropped")
|
| 62 |
+
return True
|
| 63 |
+
|
| 64 |
+
except Exception as e:
|
| 65 |
+
logger.error(f"β Rollback failed: {e}")
|
| 66 |
+
if conn:
|
| 67 |
+
conn.rollback()
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
if __name__ == "__main__":
|
| 72 |
+
import sys
|
| 73 |
+
|
| 74 |
+
if len(sys.argv) > 1 and sys.argv[1] == "down":
|
| 75 |
+
# Rollback
|
| 76 |
+
success = down()
|
| 77 |
+
else:
|
| 78 |
+
# Apply migration
|
| 79 |
+
success = up()
|
| 80 |
+
|
| 81 |
+
sys.exit(0 if success else 1)
|
database/migrations/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database migrations package
|
| 3 |
+
"""
|
database/schema.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database schema definitions for FleetMind
|
| 3 |
+
Contains PostgreSQL schema for all database tables
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
SCHEMA_SQL = """
|
| 7 |
+
-- ============================================
|
| 8 |
+
-- ORDERS TABLE
|
| 9 |
+
-- ============================================
|
| 10 |
+
|
| 11 |
+
CREATE TABLE IF NOT EXISTS orders (
|
| 12 |
+
order_id VARCHAR(50) PRIMARY KEY,
|
| 13 |
+
customer_name VARCHAR(255) NOT NULL,
|
| 14 |
+
customer_phone VARCHAR(20),
|
| 15 |
+
customer_email VARCHAR(255),
|
| 16 |
+
pickup_address TEXT,
|
| 17 |
+
pickup_lat DECIMAL(10, 8),
|
| 18 |
+
pickup_lng DECIMAL(11, 8),
|
| 19 |
+
delivery_address TEXT NOT NULL,
|
| 20 |
+
delivery_lat DECIMAL(10, 8),
|
| 21 |
+
delivery_lng DECIMAL(11, 8),
|
| 22 |
+
time_window_start TIMESTAMP,
|
| 23 |
+
time_window_end TIMESTAMP,
|
| 24 |
+
priority VARCHAR(20) CHECK(priority IN ('standard', 'express', 'urgent')) DEFAULT 'standard',
|
| 25 |
+
weight_kg DECIMAL(10, 2),
|
| 26 |
+
volume_m3 DECIMAL(10, 3),
|
| 27 |
+
special_instructions TEXT,
|
| 28 |
+
status VARCHAR(20) CHECK(status IN ('pending', 'assigned', 'in_transit', 'delivered', 'failed', 'cancelled')) DEFAULT 'pending',
|
| 29 |
+
assigned_driver_id VARCHAR(50),
|
| 30 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 31 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 32 |
+
delivered_at TIMESTAMP,
|
| 33 |
+
|
| 34 |
+
-- Additional fields for order management
|
| 35 |
+
order_value DECIMAL(10, 2),
|
| 36 |
+
payment_status VARCHAR(20) CHECK(payment_status IN ('pending', 'paid', 'cod')) DEFAULT 'pending',
|
| 37 |
+
requires_signature BOOLEAN DEFAULT FALSE,
|
| 38 |
+
is_fragile BOOLEAN DEFAULT FALSE,
|
| 39 |
+
requires_cold_storage BOOLEAN DEFAULT FALSE
|
| 40 |
+
);
|
| 41 |
+
|
| 42 |
+
-- ============================================
|
| 43 |
+
-- INDEXES FOR ORDERS TABLE
|
| 44 |
+
-- ============================================
|
| 45 |
+
|
| 46 |
+
CREATE INDEX IF NOT EXISTS idx_orders_status ON orders(status);
|
| 47 |
+
CREATE INDEX IF NOT EXISTS idx_orders_priority ON orders(priority);
|
| 48 |
+
CREATE INDEX IF NOT EXISTS idx_orders_assigned_driver ON orders(assigned_driver_id);
|
| 49 |
+
CREATE INDEX IF NOT EXISTS idx_orders_created_at ON orders(created_at);
|
| 50 |
+
CREATE INDEX IF NOT EXISTS idx_orders_time_window ON orders(time_window_start, time_window_end);
|
| 51 |
+
|
| 52 |
+
-- ============================================
|
| 53 |
+
-- TRIGGER TO UPDATE updated_at TIMESTAMP
|
| 54 |
+
-- ============================================
|
| 55 |
+
|
| 56 |
+
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
| 57 |
+
RETURNS TRIGGER AS $$
|
| 58 |
+
BEGIN
|
| 59 |
+
NEW.updated_at = CURRENT_TIMESTAMP;
|
| 60 |
+
RETURN NEW;
|
| 61 |
+
END;
|
| 62 |
+
$$ language 'plpgsql';
|
| 63 |
+
|
| 64 |
+
DROP TRIGGER IF EXISTS update_orders_timestamp ON orders;
|
| 65 |
+
CREATE TRIGGER update_orders_timestamp
|
| 66 |
+
BEFORE UPDATE ON orders
|
| 67 |
+
FOR EACH ROW
|
| 68 |
+
EXECUTE FUNCTION update_updated_at_column();
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
# Additional schema components can be added here as needed
|
| 72 |
+
DRIVERS_SCHEMA = """
|
| 73 |
+
-- ============================================
|
| 74 |
+
-- DRIVERS TABLE
|
| 75 |
+
-- ============================================
|
| 76 |
+
|
| 77 |
+
CREATE TABLE IF NOT EXISTS drivers (
|
| 78 |
+
driver_id VARCHAR(50) PRIMARY KEY,
|
| 79 |
+
name VARCHAR(255) NOT NULL,
|
| 80 |
+
phone VARCHAR(20),
|
| 81 |
+
email VARCHAR(255),
|
| 82 |
+
current_lat DECIMAL(10, 8),
|
| 83 |
+
current_lng DECIMAL(11, 8),
|
| 84 |
+
last_location_update TIMESTAMP,
|
| 85 |
+
status VARCHAR(20) CHECK(status IN ('active', 'busy', 'offline', 'unavailable')) DEFAULT 'active',
|
| 86 |
+
vehicle_type VARCHAR(50),
|
| 87 |
+
vehicle_plate VARCHAR(20),
|
| 88 |
+
capacity_kg DECIMAL(10, 2),
|
| 89 |
+
capacity_m3 DECIMAL(10, 3),
|
| 90 |
+
skills JSONB, -- JSON array: ["medical_certified", "refrigerated"]
|
| 91 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 92 |
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 93 |
+
);
|
| 94 |
+
|
| 95 |
+
CREATE INDEX IF NOT EXISTS idx_drivers_status ON drivers(status);
|
| 96 |
+
|
| 97 |
+
DROP TRIGGER IF EXISTS update_drivers_timestamp ON drivers;
|
| 98 |
+
CREATE TRIGGER update_drivers_timestamp
|
| 99 |
+
BEFORE UPDATE ON drivers
|
| 100 |
+
FOR EACH ROW
|
| 101 |
+
EXECUTE FUNCTION update_updated_at_column();
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
ASSIGNMENTS_SCHEMA = """
|
| 105 |
+
-- ============================================
|
| 106 |
+
-- ASSIGNMENTS TABLE
|
| 107 |
+
-- ============================================
|
| 108 |
+
|
| 109 |
+
CREATE TABLE IF NOT EXISTS assignments (
|
| 110 |
+
assignment_id VARCHAR(50) PRIMARY KEY,
|
| 111 |
+
order_id VARCHAR(50) NOT NULL,
|
| 112 |
+
driver_id VARCHAR(50) NOT NULL,
|
| 113 |
+
assigned_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 114 |
+
sequence_number INTEGER, -- Order in driver's route
|
| 115 |
+
estimated_arrival TIMESTAMP,
|
| 116 |
+
actual_arrival TIMESTAMP,
|
| 117 |
+
estimated_distance_km DECIMAL(10, 2),
|
| 118 |
+
actual_distance_km DECIMAL(10, 2),
|
| 119 |
+
status VARCHAR(20) CHECK(status IN ('assigned', 'in_progress', 'completed', 'failed')) DEFAULT 'assigned',
|
| 120 |
+
notes TEXT,
|
| 121 |
+
FOREIGN KEY (order_id) REFERENCES orders(order_id) ON DELETE CASCADE,
|
| 122 |
+
FOREIGN KEY (driver_id) REFERENCES drivers(driver_id) ON DELETE CASCADE
|
| 123 |
+
);
|
| 124 |
+
|
| 125 |
+
CREATE INDEX IF NOT EXISTS idx_assignments_driver ON assignments(driver_id);
|
| 126 |
+
CREATE INDEX IF NOT EXISTS idx_assignments_order ON assignments(order_id);
|
| 127 |
+
CREATE INDEX IF NOT EXISTS idx_assignments_status ON assignments(status);
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
EXCEPTIONS_SCHEMA = """
|
| 131 |
+
-- ============================================
|
| 132 |
+
-- EXCEPTIONS TABLE
|
| 133 |
+
-- ============================================
|
| 134 |
+
|
| 135 |
+
CREATE TABLE IF NOT EXISTS exceptions (
|
| 136 |
+
exception_id VARCHAR(50) PRIMARY KEY,
|
| 137 |
+
exception_type VARCHAR(50) CHECK(exception_type IN ('driver_breakdown', 'traffic_delay', 'weather', 'customer_unavailable', 'other')),
|
| 138 |
+
severity VARCHAR(20) CHECK(severity IN ('low', 'medium', 'high', 'critical')),
|
| 139 |
+
description TEXT,
|
| 140 |
+
affected_orders JSONB, -- JSON array of order IDs
|
| 141 |
+
affected_drivers JSONB, -- JSON array of driver IDs
|
| 142 |
+
detected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 143 |
+
resolved_at TIMESTAMP,
|
| 144 |
+
resolution_method JSONB, -- JSON: {agent: "...", actions: [...]}
|
| 145 |
+
coordinator_override BOOLEAN DEFAULT FALSE,
|
| 146 |
+
resolution_time_seconds INTEGER
|
| 147 |
+
);
|
| 148 |
+
|
| 149 |
+
CREATE INDEX IF NOT EXISTS idx_exceptions_type ON exceptions(exception_type);
|
| 150 |
+
CREATE INDEX IF NOT EXISTS idx_exceptions_severity ON exceptions(severity);
|
| 151 |
+
CREATE INDEX IF NOT EXISTS idx_exceptions_detected_at ON exceptions(detected_at);
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
AGENT_DECISIONS_SCHEMA = """
|
| 155 |
+
-- ============================================
|
| 156 |
+
-- AGENT DECISIONS TABLE (For Agent Learning)
|
| 157 |
+
-- ============================================
|
| 158 |
+
|
| 159 |
+
CREATE TABLE IF NOT EXISTS agent_decisions (
|
| 160 |
+
decision_id VARCHAR(50) PRIMARY KEY,
|
| 161 |
+
session_id VARCHAR(50),
|
| 162 |
+
task_type VARCHAR(50),
|
| 163 |
+
situation_context JSONB, -- JSON
|
| 164 |
+
options_generated JSONB, -- JSON array
|
| 165 |
+
selected_option JSONB, -- JSON
|
| 166 |
+
reasoning TEXT,
|
| 167 |
+
approval_status VARCHAR(20) CHECK(approval_status IN ('approved', 'rejected', 'modified')),
|
| 168 |
+
coordinator_feedback TEXT,
|
| 169 |
+
outcome_success BOOLEAN,
|
| 170 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 171 |
+
);
|
| 172 |
+
|
| 173 |
+
CREATE INDEX IF NOT EXISTS idx_decisions_session ON agent_decisions(session_id);
|
| 174 |
+
CREATE INDEX IF NOT EXISTS idx_decisions_task_type ON agent_decisions(task_type);
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
METRICS_SCHEMA = """
|
| 178 |
+
-- ============================================
|
| 179 |
+
-- PERFORMANCE METRICS TABLE
|
| 180 |
+
-- ============================================
|
| 181 |
+
|
| 182 |
+
CREATE TABLE IF NOT EXISTS metrics (
|
| 183 |
+
metric_id VARCHAR(50) PRIMARY KEY,
|
| 184 |
+
date DATE,
|
| 185 |
+
total_orders INTEGER,
|
| 186 |
+
on_time_deliveries INTEGER,
|
| 187 |
+
failed_deliveries INTEGER,
|
| 188 |
+
exceptions_count INTEGER,
|
| 189 |
+
avg_resolution_time_seconds DECIMAL(10, 2),
|
| 190 |
+
coordinator_time_saved_minutes INTEGER,
|
| 191 |
+
fuel_cost_savings DECIMAL(10, 2)
|
| 192 |
+
);
|
| 193 |
+
|
| 194 |
+
CREATE INDEX IF NOT EXISTS idx_metrics_date ON metrics(date);
|
| 195 |
+
"""
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def get_full_schema() -> str:
|
| 199 |
+
"""
|
| 200 |
+
Get the complete database schema including all tables.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
str: Complete SQL schema
|
| 204 |
+
"""
|
| 205 |
+
return (
|
| 206 |
+
SCHEMA_SQL + "\n" +
|
| 207 |
+
DRIVERS_SCHEMA + "\n" +
|
| 208 |
+
ASSIGNMENTS_SCHEMA + "\n" +
|
| 209 |
+
EXCEPTIONS_SCHEMA + "\n" +
|
| 210 |
+
AGENT_DECISIONS_SCHEMA + "\n" +
|
| 211 |
+
METRICS_SCHEMA
|
| 212 |
+
)
|
flow.md
ADDED
|
@@ -0,0 +1,828 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
β π Complete Function Flow
|
| 2 |
+
|
| 3 |
+
Input: "Create an order for John Doe at 123 Main St, San
|
| 4 |
+
Francisco"
|
| 5 |
+
|
| 6 |
+
Let me trace every single function call with this exact example!
|
| 7 |
+
|
| 8 |
+
---
|
| 9 |
+
π FUNCTION CALL SEQUENCE:
|
| 10 |
+
|
| 11 |
+
STEP 1: User Clicks Send Button
|
| 12 |
+
|
| 13 |
+
FILE: ui/app.py
|
| 14 |
+
LINE: 448-452
|
| 15 |
+
|
| 16 |
+
User clicks "Send" button in Gradio UI
|
| 17 |
+
β
|
| 18 |
+
FUNCTION CALLED: send_message(message, conv_state)
|
| 19 |
+
message = "Create an order for John Doe at 123 Main St, San
|
| 20 |
+
Francisco"
|
| 21 |
+
conv_state = ConversationManager object
|
| 22 |
+
|
| 23 |
+
Function Code:
|
| 24 |
+
def send_message(message, conv_state):
|
| 25 |
+
"""Handle send button click"""
|
| 26 |
+
chat_history, tools, new_state = handle_chat_message(message,
|
| 27 |
+
conv_state)
|
| 28 |
+
# β
|
| 29 |
+
# CALLS THIS NEXT
|
| 30 |
+
return chat_history, tools, new_state, ""
|
| 31 |
+
|
| 32 |
+
---
|
| 33 |
+
STEP 2: handle_chat_message()
|
| 34 |
+
|
| 35 |
+
FILE: ui/app.py
|
| 36 |
+
LINE: 223-241
|
| 37 |
+
|
| 38 |
+
FUNCTION: handle_chat_message(message, conversation_state)
|
| 39 |
+
message = "Create an order for John Doe at 123 Main St, San
|
| 40 |
+
Francisco"
|
| 41 |
+
conversation_state = ConversationManager object
|
| 42 |
+
|
| 43 |
+
Function Code:
|
| 44 |
+
def handle_chat_message(message, conversation_state):
|
| 45 |
+
if not message.strip():
|
| 46 |
+
return ...
|
| 47 |
+
|
| 48 |
+
# Process message through chat engine
|
| 49 |
+
response, tool_calls = chat_engine.process_message(message,
|
| 50 |
+
conversation_state)
|
| 51 |
+
# β
|
| 52 |
+
# CALLS THIS NEXT
|
| 53 |
+
|
| 54 |
+
# Return updated UI
|
| 55 |
+
return conversation_state.get_formatted_history(),
|
| 56 |
+
conversation_state.get_tool_calls(), conversation_state
|
| 57 |
+
|
| 58 |
+
---
|
| 59 |
+
STEP 3: chat_engine.process_message()
|
| 60 |
+
|
| 61 |
+
FILE: chat/chat_engine.py
|
| 62 |
+
LINE: 58-73
|
| 63 |
+
|
| 64 |
+
FUNCTION: ChatEngine.process_message(user_message, conversation)
|
| 65 |
+
user_message = "Create an order for John Doe at 123 Main St,
|
| 66 |
+
San Francisco"
|
| 67 |
+
conversation = ConversationManager object
|
| 68 |
+
|
| 69 |
+
Function Code:
|
| 70 |
+
def process_message(self, user_message, conversation):
|
| 71 |
+
"""Process user message and return AI response"""
|
| 72 |
+
return self.provider.process_message(user_message,
|
| 73 |
+
conversation)
|
| 74 |
+
# β
|
| 75 |
+
# self.provider = GeminiProvider (from chat_engine.py:26)
|
| 76 |
+
# CALLS GeminiProvider.process_message() NEXT
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
STEP 4: GeminiProvider.process_message()
|
| 80 |
+
|
| 81 |
+
FILE: chat/providers/gemini_provider.py
|
| 82 |
+
LINE: 173-212
|
| 83 |
+
|
| 84 |
+
FUNCTION: GeminiProvider.process_message(user_message,
|
| 85 |
+
conversation)
|
| 86 |
+
user_message = "Create an order for John Doe at 123 Main St,
|
| 87 |
+
San Francisco"
|
| 88 |
+
conversation = ConversationManager object
|
| 89 |
+
|
| 90 |
+
Function Code:
|
| 91 |
+
def process_message(self, user_message, conversation):
|
| 92 |
+
"""Process user message with Gemini"""
|
| 93 |
+
if not self.api_available:
|
| 94 |
+
return self._handle_no_api(), []
|
| 95 |
+
|
| 96 |
+
# Lazy initialization on first use
|
| 97 |
+
self._ensure_initialized() # β CALLS THIS if not initialized
|
| 98 |
+
|
| 99 |
+
if not self._initialized:
|
| 100 |
+
return "β οΈ Failed to initialize...", []
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
# Build conversation history for Gemini
|
| 104 |
+
chat =
|
| 105 |
+
self.model.start_chat(history=self._convert_history(conversation))
|
| 106 |
+
# β
|
| 107 |
+
# CALLS
|
| 108 |
+
_convert_history()
|
| 109 |
+
|
| 110 |
+
# Send message and get response
|
| 111 |
+
response = chat.send_message(user_message,
|
| 112 |
+
safety_settings={...})
|
| 113 |
+
# β
|
| 114 |
+
# π API CALL TO GOOGLE GEMINI
|
| 115 |
+
# Sends: "Create an order for John Doe at 123
|
| 116 |
+
Main St, San Francisco"
|
| 117 |
+
|
| 118 |
+
# Add user message to conversation
|
| 119 |
+
conversation.add_message("user", user_message)
|
| 120 |
+
|
| 121 |
+
# Process response and handle function calls
|
| 122 |
+
return self._process_response(response, conversation,
|
| 123 |
+
chat)
|
| 124 |
+
# β
|
| 125 |
+
# CALLS THIS NEXT
|
| 126 |
+
|
| 127 |
+
---
|
| 128 |
+
STEP 5: Gemini API Processes Request
|
| 129 |
+
|
| 130 |
+
π GOOGLE GEMINI API (External)
|
| 131 |
+
|
| 132 |
+
RECEIVES:
|
| 133 |
+
- System Prompt: "You are an AI assistant for FleetMind..."
|
| 134 |
+
- User Message: "Create an order for John Doe at 123 Main St, San
|
| 135 |
+
Francisco"
|
| 136 |
+
- Available Tools: [geocode_address, create_order]
|
| 137 |
+
|
| 138 |
+
AI ANALYZES:
|
| 139 |
+
"User wants to create an order. I have:
|
| 140 |
+
β
Customer Name: John Doe
|
| 141 |
+
β
Address: 123 Main St, San Francisco
|
| 142 |
+
β GPS Coordinates: Missing!
|
| 143 |
+
|
| 144 |
+
DECISION: Call geocode_address tool first to get coordinates."
|
| 145 |
+
|
| 146 |
+
RETURNS TO CODE:
|
| 147 |
+
response = {
|
| 148 |
+
candidates: [{
|
| 149 |
+
content: {
|
| 150 |
+
parts: [{
|
| 151 |
+
function_call: {
|
| 152 |
+
name: "geocode_address",
|
| 153 |
+
args: {
|
| 154 |
+
"address": "123 Main St, San Francisco"
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
}]
|
| 158 |
+
}
|
| 159 |
+
}]
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
---
|
| 163 |
+
STEP 6: _process_response() - Detects Function Call
|
| 164 |
+
|
| 165 |
+
FILE: chat/providers/gemini_provider.py
|
| 166 |
+
LINE: 226-393
|
| 167 |
+
|
| 168 |
+
FUNCTION: _process_response(response, conversation, chat)
|
| 169 |
+
response = Response from Gemini with function_call
|
| 170 |
+
conversation = ConversationManager object
|
| 171 |
+
chat = Gemini chat session
|
| 172 |
+
|
| 173 |
+
Function Code:
|
| 174 |
+
def _process_response(self, response, conversation, chat):
|
| 175 |
+
"""Process Gemini's response and handle function calls"""
|
| 176 |
+
tool_calls_made = []
|
| 177 |
+
|
| 178 |
+
try:
|
| 179 |
+
# Check ALL parts for function calls
|
| 180 |
+
parts = response.candidates[0].content.parts
|
| 181 |
+
logger.info(f"Processing response with {len(parts)}
|
| 182 |
+
part(s)")
|
| 183 |
+
# β
|
| 184 |
+
# LOGS: "Processing response with 1 part(s)"
|
| 185 |
+
|
| 186 |
+
for part in parts:
|
| 187 |
+
if hasattr(part, 'function_call'):
|
| 188 |
+
fc = part.function_call
|
| 189 |
+
if fc and hasattr(fc, 'name') and fc.name:
|
| 190 |
+
has_function_call = True
|
| 191 |
+
logger.info(f"Detected function call:
|
| 192 |
+
{fc.name}")
|
| 193 |
+
# β
|
| 194 |
+
# LOGS: "Detected function call:
|
| 195 |
+
geocode_address"
|
| 196 |
+
break
|
| 197 |
+
|
| 198 |
+
if has_function_call:
|
| 199 |
+
# Handle function calls (potentially multiple in
|
| 200 |
+
sequence)
|
| 201 |
+
current_response = response
|
| 202 |
+
max_iterations = 10
|
| 203 |
+
|
| 204 |
+
for iteration in range(max_iterations): # β LOOP
|
| 205 |
+
STARTS
|
| 206 |
+
# Extract function call details
|
| 207 |
+
first_part =
|
| 208 |
+
current_response.candidates[0].content.parts[0]
|
| 209 |
+
function_call = first_part.function_call
|
| 210 |
+
function_name = function_call.name #
|
| 211 |
+
"geocode_address"
|
| 212 |
+
function_args = dict(function_call.args) #
|
| 213 |
+
{"address": "123 Main St, San Francisco"}
|
| 214 |
+
|
| 215 |
+
logger.info(f"Gemini executing function:
|
| 216 |
+
{function_name} (iteration {iteration + 1})")
|
| 217 |
+
# β
|
| 218 |
+
# LOGS: "Gemini executing function:
|
| 219 |
+
geocode_address (iteration 1)"
|
| 220 |
+
|
| 221 |
+
# Execute the tool
|
| 222 |
+
tool_result = execute_tool(function_name,
|
| 223 |
+
function_args)
|
| 224 |
+
# β
|
| 225 |
+
# CALLS execute_tool() NEXT
|
| 226 |
+
|
| 227 |
+
---
|
| 228 |
+
STEP 7: execute_tool() - Routes to Handler
|
| 229 |
+
|
| 230 |
+
FILE: chat/tools.py
|
| 231 |
+
LINE: 92-118
|
| 232 |
+
|
| 233 |
+
FUNCTION: execute_tool(tool_name, tool_input)
|
| 234 |
+
tool_name = "geocode_address"
|
| 235 |
+
tool_input = {"address": "123 Main St, San Francisco"}
|
| 236 |
+
|
| 237 |
+
Function Code:
|
| 238 |
+
def execute_tool(tool_name, tool_input):
|
| 239 |
+
"""Route tool execution to appropriate handler"""
|
| 240 |
+
try:
|
| 241 |
+
if tool_name == "geocode_address":
|
| 242 |
+
return handle_geocode_address(tool_input)
|
| 243 |
+
# β
|
| 244 |
+
# CALLS THIS NEXT
|
| 245 |
+
elif tool_name == "create_order":
|
| 246 |
+
return handle_create_order(tool_input)
|
| 247 |
+
else:
|
| 248 |
+
return {"success": False, "error": f"Unknown tool:
|
| 249 |
+
{tool_name}"}
|
| 250 |
+
except Exception as e:
|
| 251 |
+
logger.error(f"Tool execution error ({tool_name}): {e}")
|
| 252 |
+
return {"success": False, "error": str(e)}
|
| 253 |
+
|
| 254 |
+
---
|
| 255 |
+
STEP 8: handle_geocode_address()
|
| 256 |
+
|
| 257 |
+
FILE: chat/tools.py
|
| 258 |
+
LINE: 121-150
|
| 259 |
+
|
| 260 |
+
FUNCTION: handle_geocode_address(tool_input)
|
| 261 |
+
tool_input = {"address": "123 Main St, San Francisco"}
|
| 262 |
+
|
| 263 |
+
Function Code:
|
| 264 |
+
def handle_geocode_address(tool_input):
|
| 265 |
+
"""Execute geocoding tool"""
|
| 266 |
+
address = tool_input.get("address", "") # "123 Main St, San
|
| 267 |
+
Francisco"
|
| 268 |
+
|
| 269 |
+
if not address:
|
| 270 |
+
return {"success": False, "error": "Address is required"}
|
| 271 |
+
|
| 272 |
+
logger.info(f"Geocoding address: {address}")
|
| 273 |
+
# β
|
| 274 |
+
# LOGS: "Geocoding address: 123 Main St, San
|
| 275 |
+
Francisco"
|
| 276 |
+
|
| 277 |
+
result = geocoding_service.geocode(address)
|
| 278 |
+
# β
|
| 279 |
+
# CALLS geocoding_service.geocode() NEXT
|
| 280 |
+
|
| 281 |
+
return {
|
| 282 |
+
"success": True,
|
| 283 |
+
"latitude": result["lat"],
|
| 284 |
+
"longitude": result["lng"],
|
| 285 |
+
"formatted_address": result["formatted_address"],
|
| 286 |
+
"confidence": result["confidence"],
|
| 287 |
+
"message": f"Address geocoded successfully
|
| 288 |
+
({result['confidence']})"
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
---
|
| 292 |
+
STEP 9: GeocodingService.geocode()
|
| 293 |
+
|
| 294 |
+
FILE: chat/geocoding.py
|
| 295 |
+
LINE: 28-65
|
| 296 |
+
|
| 297 |
+
FUNCTION: GeocodingService.geocode(address)
|
| 298 |
+
address = "123 Main St, San Francisco"
|
| 299 |
+
|
| 300 |
+
Function Code:
|
| 301 |
+
def geocode(self, address):
|
| 302 |
+
"""Geocode an address to coordinates"""
|
| 303 |
+
if not address:
|
| 304 |
+
return self._error_response("Address is required")
|
| 305 |
+
|
| 306 |
+
# Use mock or real API
|
| 307 |
+
if self.use_mock: # True (no HERE_API_KEY configured)
|
| 308 |
+
return self._geocode_mock(address)
|
| 309 |
+
# β
|
| 310 |
+
# CALLS THIS NEXT
|
| 311 |
+
else:
|
| 312 |
+
return self._geocode_here(address)
|
| 313 |
+
|
| 314 |
+
---
|
| 315 |
+
STEP 10: _geocode_mock() - Returns Coordinates
|
| 316 |
+
|
| 317 |
+
FILE: chat/geocoding.py
|
| 318 |
+
LINE: 52-70
|
| 319 |
+
|
| 320 |
+
FUNCTION: _geocode_mock(address)
|
| 321 |
+
address = "123 Main St, San Francisco"
|
| 322 |
+
|
| 323 |
+
Function Code:
|
| 324 |
+
def _geocode_mock(self, address):
|
| 325 |
+
"""Mock geocoding using city detection"""
|
| 326 |
+
address_lower = address.lower()
|
| 327 |
+
|
| 328 |
+
# Try to detect city in address
|
| 329 |
+
for city_name, (lat, lng) in CITY_COORDINATES.items():
|
| 330 |
+
if city_name in address_lower:
|
| 331 |
+
logger.info(f"Mock geocoding detected city:
|
| 332 |
+
{city_name}")
|
| 333 |
+
# β
|
| 334 |
+
# LOGS: "Mock geocoding detected city: san
|
| 335 |
+
francisco"
|
| 336 |
+
|
| 337 |
+
return {
|
| 338 |
+
"lat": lat, # 37.7749
|
| 339 |
+
"lng": lng, # -122.4194
|
| 340 |
+
"formatted_address": address,
|
| 341 |
+
"confidence": "mock"
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
# Default to San Francisco if no city detected
|
| 345 |
+
return {
|
| 346 |
+
"lat": 37.7749,
|
| 347 |
+
"lng": -122.4194,
|
| 348 |
+
"formatted_address": address,
|
| 349 |
+
"confidence": "mock"
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
RETURNS:
|
| 353 |
+
{
|
| 354 |
+
"lat": 37.7749,
|
| 355 |
+
"lng": -122.4194,
|
| 356 |
+
"formatted_address": "123 Main St, San Francisco",
|
| 357 |
+
"confidence": "mock"
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
---
|
| 361 |
+
STEP 11: Back to handle_geocode_address()
|
| 362 |
+
|
| 363 |
+
FILE: chat/tools.py
|
| 364 |
+
LINE: 141-150
|
| 365 |
+
|
| 366 |
+
result = {
|
| 367 |
+
"lat": 37.7749,
|
| 368 |
+
"lng": -122.4194,
|
| 369 |
+
"formatted_address": "123 Main St, San Francisco",
|
| 370 |
+
"confidence": "mock"
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
RETURNS:
|
| 374 |
+
{
|
| 375 |
+
"success": True,
|
| 376 |
+
"latitude": 37.7749,
|
| 377 |
+
"longitude": -122.4194,
|
| 378 |
+
"formatted_address": "123 Main St, San Francisco",
|
| 379 |
+
"confidence": "mock",
|
| 380 |
+
"message": "Address geocoded successfully (mock)"
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
---
|
| 384 |
+
STEP 12: Back to _process_response() - Tool Result Received
|
| 385 |
+
|
| 386 |
+
FILE: chat/providers/gemini_provider.py
|
| 387 |
+
LINE: 285-310
|
| 388 |
+
|
| 389 |
+
tool_result = {
|
| 390 |
+
"success": True,
|
| 391 |
+
"latitude": 37.7749,
|
| 392 |
+
"longitude": -122.4194,
|
| 393 |
+
"formatted_address": "123 Main St, San Francisco",
|
| 394 |
+
"confidence": "mock",
|
| 395 |
+
"message": "Address geocoded successfully (mock)"
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
# Track for transparency
|
| 399 |
+
tool_calls_made.append({
|
| 400 |
+
"tool": "geocode_address",
|
| 401 |
+
"input": {"address": "123 Main St, San Francisco"},
|
| 402 |
+
"result": tool_result
|
| 403 |
+
})
|
| 404 |
+
|
| 405 |
+
conversation.add_tool_result("geocode_address", function_args,
|
| 406 |
+
tool_result)
|
| 407 |
+
|
| 408 |
+
# Send function result back to Gemini
|
| 409 |
+
current_response = chat.send_message(
|
| 410 |
+
genai.protos.Content(
|
| 411 |
+
parts=[genai.protos.Part(
|
| 412 |
+
function_response=genai.protos.FunctionResponse(
|
| 413 |
+
name="geocode_address",
|
| 414 |
+
response={"result": tool_result}
|
| 415 |
+
)
|
| 416 |
+
)]
|
| 417 |
+
)
|
| 418 |
+
)
|
| 419 |
+
# β
|
| 420 |
+
# π API CALL TO GEMINI WITH GEOCODING RESULT
|
| 421 |
+
|
| 422 |
+
---
|
| 423 |
+
STEP 13: Gemini Receives Geocoding Result
|
| 424 |
+
|
| 425 |
+
π GOOGLE GEMINI API (External)
|
| 426 |
+
|
| 427 |
+
RECEIVES:
|
| 428 |
+
- Function: geocode_address
|
| 429 |
+
- Result: {
|
| 430 |
+
"success": True,
|
| 431 |
+
"latitude": 37.7749,
|
| 432 |
+
"longitude": -122.4194
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
AI ANALYZES:
|
| 436 |
+
"Great! I now have GPS coordinates:
|
| 437 |
+
β
Customer Name: John Doe
|
| 438 |
+
β
Address: 123 Main St, San Francisco
|
| 439 |
+
β
Latitude: 37.7749
|
| 440 |
+
β
Longitude: -122.4194
|
| 441 |
+
|
| 442 |
+
DECISION: Now I can create the order in the database!
|
| 443 |
+
Call create_order tool."
|
| 444 |
+
|
| 445 |
+
RETURNS TO CODE:
|
| 446 |
+
response = {
|
| 447 |
+
candidates: [{
|
| 448 |
+
content: {
|
| 449 |
+
parts: [{
|
| 450 |
+
function_call: {
|
| 451 |
+
name: "create_order",
|
| 452 |
+
args: {
|
| 453 |
+
"customer_name": "John Doe",
|
| 454 |
+
"delivery_address": "123 Main St, San
|
| 455 |
+
Francisco",
|
| 456 |
+
"delivery_lat": 37.7749,
|
| 457 |
+
"delivery_lng": -122.4194,
|
| 458 |
+
"priority": "standard"
|
| 459 |
+
}
|
| 460 |
+
}
|
| 461 |
+
}]
|
| 462 |
+
}
|
| 463 |
+
}]
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
---
|
| 467 |
+
STEP 14: Loop Continues - Detects create_order
|
| 468 |
+
|
| 469 |
+
FILE: chat/providers/gemini_provider.py
|
| 470 |
+
LINE: 252-285
|
| 471 |
+
|
| 472 |
+
# Still in the for loop (iteration 2)
|
| 473 |
+
first_part = current_response.candidates[0].content.parts[0]
|
| 474 |
+
has_fc = True # Another function call detected
|
| 475 |
+
|
| 476 |
+
function_call = first_part.function_call
|
| 477 |
+
function_name = function_call.name # "create_order"
|
| 478 |
+
function_args = dict(function_call.args) # {customer_name,
|
| 479 |
+
address, lat, lng...}
|
| 480 |
+
|
| 481 |
+
logger.info(f"Gemini executing function: {function_name}
|
| 482 |
+
(iteration 2)")
|
| 483 |
+
# β
|
| 484 |
+
# LOGS: "Gemini executing function: create_order
|
| 485 |
+
(iteration 2)"
|
| 486 |
+
|
| 487 |
+
# Execute the tool
|
| 488 |
+
tool_result = execute_tool(function_name, function_args)
|
| 489 |
+
# β
|
| 490 |
+
# CALLS execute_tool() AGAIN
|
| 491 |
+
|
| 492 |
+
---
|
| 493 |
+
STEP 15: execute_tool() - Routes to create_order
|
| 494 |
+
|
| 495 |
+
FILE: chat/tools.py
|
| 496 |
+
LINE: 92-118
|
| 497 |
+
|
| 498 |
+
FUNCTION: execute_tool(tool_name, tool_input)
|
| 499 |
+
tool_name = "create_order"
|
| 500 |
+
tool_input = {
|
| 501 |
+
"customer_name": "John Doe",
|
| 502 |
+
"delivery_address": "123 Main St, San Francisco",
|
| 503 |
+
"delivery_lat": 37.7749,
|
| 504 |
+
"delivery_lng": -122.4194,
|
| 505 |
+
"priority": "standard"
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
Function Code:
|
| 509 |
+
def execute_tool(tool_name, tool_input):
|
| 510 |
+
try:
|
| 511 |
+
if tool_name == "geocode_address":
|
| 512 |
+
return handle_geocode_address(tool_input)
|
| 513 |
+
elif tool_name == "create_order":
|
| 514 |
+
return handle_create_order(tool_input)
|
| 515 |
+
# β
|
| 516 |
+
# CALLS THIS NEXT
|
| 517 |
+
|
| 518 |
+
---
|
| 519 |
+
STEP 16: handle_create_order()
|
| 520 |
+
|
| 521 |
+
FILE: chat/tools.py
|
| 522 |
+
LINE: 153-242
|
| 523 |
+
|
| 524 |
+
FUNCTION: handle_create_order(tool_input)
|
| 525 |
+
tool_input = {
|
| 526 |
+
"customer_name": "John Doe",
|
| 527 |
+
"delivery_address": "123 Main St, San Francisco",
|
| 528 |
+
"delivery_lat": 37.7749,
|
| 529 |
+
"delivery_lng": -122.4194,
|
| 530 |
+
"priority": "standard"
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
Function Code:
|
| 534 |
+
def handle_create_order(tool_input):
|
| 535 |
+
"""Execute order creation tool"""
|
| 536 |
+
|
| 537 |
+
# Extract fields with defaults
|
| 538 |
+
customer_name = tool_input.get("customer_name") # "John Doe"
|
| 539 |
+
customer_phone = tool_input.get("customer_phone") # None
|
| 540 |
+
customer_email = tool_input.get("customer_email") # None
|
| 541 |
+
delivery_address = tool_input.get("delivery_address") # "123
|
| 542 |
+
Main St, San Francisco"
|
| 543 |
+
delivery_lat = tool_input.get("delivery_lat") # 37.7749
|
| 544 |
+
delivery_lng = tool_input.get("delivery_lng") # -122.4194
|
| 545 |
+
priority = tool_input.get("priority", "standard") #
|
| 546 |
+
"standard"
|
| 547 |
+
special_instructions = tool_input.get("special_instructions")
|
| 548 |
+
# None
|
| 549 |
+
weight_kg = tool_input.get("weight_kg", 5.0) # 5.0
|
| 550 |
+
|
| 551 |
+
# Validate required fields
|
| 552 |
+
if not all([customer_name, delivery_address, delivery_lat,
|
| 553 |
+
delivery_lng]):
|
| 554 |
+
return {"success": False, "error": "Missing required
|
| 555 |
+
fields..."}
|
| 556 |
+
|
| 557 |
+
# Generate order ID
|
| 558 |
+
now = datetime.now()
|
| 559 |
+
order_id = f"ORD-{now.strftime('%Y%m%d%H%M%S')}"
|
| 560 |
+
# β
|
| 561 |
+
# e.g., "ORD-20251114015858"
|
| 562 |
+
|
| 563 |
+
# Handle time window
|
| 564 |
+
time_window_end = now + timedelta(hours=6) # 6 hours from now
|
| 565 |
+
time_window_start = now + timedelta(hours=2) # 2 hours from
|
| 566 |
+
now
|
| 567 |
+
|
| 568 |
+
# Insert into database
|
| 569 |
+
query = """
|
| 570 |
+
INSERT INTO orders (
|
| 571 |
+
order_id, customer_name, customer_phone,
|
| 572 |
+
customer_email,
|
| 573 |
+
delivery_address, delivery_lat, delivery_lng,
|
| 574 |
+
time_window_start, time_window_end,
|
| 575 |
+
priority, weight_kg, status, special_instructions
|
| 576 |
+
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
|
| 577 |
+
%s)
|
| 578 |
+
"""
|
| 579 |
+
|
| 580 |
+
params = (
|
| 581 |
+
order_id, # "ORD-20251114015858"
|
| 582 |
+
customer_name, # "John Doe"
|
| 583 |
+
customer_phone, # None
|
| 584 |
+
customer_email, # None
|
| 585 |
+
delivery_address, # "123 Main St, San Francisco"
|
| 586 |
+
delivery_lat, # 37.7749
|
| 587 |
+
delivery_lng, # -122.4194
|
| 588 |
+
time_window_start, # 2025-11-14 03:58:58
|
| 589 |
+
time_window_end, # 2025-11-14 07:58:58
|
| 590 |
+
priority, # "standard"
|
| 591 |
+
weight_kg, # 5.0
|
| 592 |
+
"pending", # status
|
| 593 |
+
special_instructions # None
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
try:
|
| 597 |
+
execute_write(query, params)
|
| 598 |
+
# β
|
| 599 |
+
# CALLS THIS NEXT - DATABASE WRITE!
|
| 600 |
+
|
| 601 |
+
---
|
| 602 |
+
STEP 17: execute_write() - INSERT INTO DATABASE
|
| 603 |
+
|
| 604 |
+
FILE: database/connection.py
|
| 605 |
+
LINE: 71-97
|
| 606 |
+
|
| 607 |
+
FUNCTION: execute_write(query, params)
|
| 608 |
+
query = "INSERT INTO orders (...) VALUES (%s, %s, ...)"
|
| 609 |
+
params = ("ORD-20251114015858", "John Doe", None, None, "123
|
| 610 |
+
Main St...", ...)
|
| 611 |
+
|
| 612 |
+
Function Code:
|
| 613 |
+
def execute_write(query, params=None):
|
| 614 |
+
"""Execute a write query (INSERT, UPDATE, DELETE)"""
|
| 615 |
+
try:
|
| 616 |
+
# Connect to PostgreSQL
|
| 617 |
+
conn = get_db_connection()
|
| 618 |
+
# β
|
| 619 |
+
# Opens connection to localhost:5432/fleetmind
|
| 620 |
+
|
| 621 |
+
logger.info("Database connection established:
|
| 622 |
+
fleetmind@localhost")
|
| 623 |
+
|
| 624 |
+
cursor = conn.cursor()
|
| 625 |
+
|
| 626 |
+
# Execute INSERT query
|
| 627 |
+
cursor.execute(query, params)
|
| 628 |
+
# β
|
| 629 |
+
# πΎ EXECUTES SQL:
|
| 630 |
+
# INSERT INTO orders (order_id, customer_name, ...)
|
| 631 |
+
# VALUES ('ORD-20251114015858', 'John Doe', ...)
|
| 632 |
+
|
| 633 |
+
conn.commit() # β SAVES TO POSTGRESQL PERMANENTLY!
|
| 634 |
+
|
| 635 |
+
rows_affected = cursor.rowcount # 1
|
| 636 |
+
|
| 637 |
+
cursor.close()
|
| 638 |
+
conn.close()
|
| 639 |
+
|
| 640 |
+
logger.info("Database connection closed")
|
| 641 |
+
|
| 642 |
+
return rows_affected # Returns 1
|
| 643 |
+
|
| 644 |
+
DATABASE STATE:
|
| 645 |
+
-- New row added to orders table:
|
| 646 |
+
ORDER_ID: ORD-20251114015858
|
| 647 |
+
CUSTOMER_NAME: John Doe
|
| 648 |
+
CUSTOMER_PHONE: NULL
|
| 649 |
+
CUSTOMER_EMAIL: NULL
|
| 650 |
+
DELIVERY_ADDRESS: 123 Main St, San Francisco
|
| 651 |
+
DELIVERY_LAT: 37.7749
|
| 652 |
+
DELIVERY_LNG: -122.4194
|
| 653 |
+
STATUS: pending
|
| 654 |
+
PRIORITY: standard
|
| 655 |
+
WEIGHT_KG: 5.0
|
| 656 |
+
CREATED_AT: 2025-11-14 01:58:58
|
| 657 |
+
|
| 658 |
+
---
|
| 659 |
+
STEP 18: Back to handle_create_order() - Success!
|
| 660 |
+
|
| 661 |
+
FILE: chat/tools.py
|
| 662 |
+
LINE: 224-242
|
| 663 |
+
|
| 664 |
+
execute_write(query, params) # Returned 1 (success)
|
| 665 |
+
|
| 666 |
+
logger.info(f"Order created: {order_id}")
|
| 667 |
+
# β
|
| 668 |
+
# LOGS: "Order created: ORD-20251114015858"
|
| 669 |
+
|
| 670 |
+
return {
|
| 671 |
+
"success": True,
|
| 672 |
+
"order_id": "ORD-20251114015858",
|
| 673 |
+
"status": "pending",
|
| 674 |
+
"customer": "John Doe",
|
| 675 |
+
"address": "123 Main St, San Francisco",
|
| 676 |
+
"deadline": "2025-11-14 07:58",
|
| 677 |
+
"priority": "standard",
|
| 678 |
+
"message": "Order ORD-20251114015858 created successfully!"
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
---
|
| 682 |
+
STEP 19: Back to _process_response() - Second Tool Complete
|
| 683 |
+
|
| 684 |
+
FILE: chat/providers/gemini_provider.py
|
| 685 |
+
LINE: 285-310
|
| 686 |
+
|
| 687 |
+
tool_result = {
|
| 688 |
+
"success": True,
|
| 689 |
+
"order_id": "ORD-20251114015858",
|
| 690 |
+
"status": "pending",
|
| 691 |
+
"customer": "John Doe",
|
| 692 |
+
...
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
# Track for transparency
|
| 696 |
+
tool_calls_made.append({
|
| 697 |
+
"tool": "create_order",
|
| 698 |
+
"input": {
|
| 699 |
+
"customer_name": "John Doe",
|
| 700 |
+
"delivery_address": "123 Main St, San Francisco",
|
| 701 |
+
"delivery_lat": 37.7749,
|
| 702 |
+
"delivery_lng": -122.4194
|
| 703 |
+
},
|
| 704 |
+
"result": tool_result
|
| 705 |
+
})
|
| 706 |
+
|
| 707 |
+
conversation.add_tool_result("create_order", function_args,
|
| 708 |
+
tool_result)
|
| 709 |
+
|
| 710 |
+
# Send function result back to Gemini
|
| 711 |
+
current_response = chat.send_message(
|
| 712 |
+
genai.protos.Content(
|
| 713 |
+
parts=[genai.protos.Part(
|
| 714 |
+
function_response=genai.protos.FunctionResponse(
|
| 715 |
+
name="create_order",
|
| 716 |
+
response={"result": tool_result}
|
| 717 |
+
)
|
| 718 |
+
)]
|
| 719 |
+
)
|
| 720 |
+
)
|
| 721 |
+
# β
|
| 722 |
+
# π API CALL TO GEMINI WITH ORDER CREATION RESULT
|
| 723 |
+
|
| 724 |
+
---
|
| 725 |
+
STEP 20: Gemini Generates Final Response
|
| 726 |
+
|
| 727 |
+
π GOOGLE GEMINI API (External)
|
| 728 |
+
|
| 729 |
+
RECEIVES:
|
| 730 |
+
- Function: create_order
|
| 731 |
+
- Result: {
|
| 732 |
+
"success": True,
|
| 733 |
+
"order_id": "ORD-20251114015858",
|
| 734 |
+
"customer": "John Doe",
|
| 735 |
+
"address": "123 Main St, San Francisco"
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
AI ANALYZES:
|
| 739 |
+
"Perfect! Order successfully created in database!
|
| 740 |
+
Let me tell the user with a nice summary."
|
| 741 |
+
|
| 742 |
+
GENERATES TEXT RESPONSE:
|
| 743 |
+
"Awesome! I have created the order for John Doe.
|
| 744 |
+
|
| 745 |
+
**Here's a summary:**
|
| 746 |
+
β’ Order ID: ORD-20251114015858
|
| 747 |
+
β’ Customer: John Doe
|
| 748 |
+
β’ Address: 123 Main St, San Francisco
|
| 749 |
+
β’ Status: Pending
|
| 750 |
+
β’ Priority: Standard
|
| 751 |
+
|
| 752 |
+
The order has been successfully saved to the database!"
|
| 753 |
+
|
| 754 |
+
RETURNS TO CODE:
|
| 755 |
+
response = {
|
| 756 |
+
candidates: [{
|
| 757 |
+
content: {
|
| 758 |
+
parts: [{
|
| 759 |
+
text: "Awesome! I have created the order for John
|
| 760 |
+
Doe.\n\n**Here's a summary:**..."
|
| 761 |
+
}]
|
| 762 |
+
}
|
| 763 |
+
}]
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
---
|
| 767 |
+
STEP 21: _process_response() - Extract Final Text
|
| 768 |
+
|
| 769 |
+
FILE: chat/providers/gemini_provider.py
|
| 770 |
+
LINE: 272-356
|
| 771 |
+
|
| 772 |
+
# Loop detects no more function calls
|
| 773 |
+
logger.info(f"No more function calls after iteration 2")
|
| 774 |
+
|
| 775 |
+
# Extract text from final response
|
| 776 |
+
parts = current_response.candidates[0].content.parts
|
| 777 |
+
logger.info(f"Extracting text from {len(parts)} parts")
|
| 778 |
+
|
| 779 |
+
for idx, part in enumerate(parts):
|
| 780 |
+
if hasattr(part, 'text') and part.text:
|
| 781 |
+
logger.info(f"Part {idx} has text: {part.text[:50]}...")
|
| 782 |
+
final_text += part.text
|
| 783 |
+
|
| 784 |
+
# final_text = "Awesome! I have created the order for John Doe..."
|
| 785 |
+
|
| 786 |
+
logger.info(f"Returning response: {final_text[:100]}")
|
| 787 |
+
|
| 788 |
+
conversation.add_message("assistant", final_text)
|
| 789 |
+
|
| 790 |
+
return final_text, tool_calls_made
|
| 791 |
+
# β β
|
| 792 |
+
# Response List of 2 tool calls [geocode, create_order]
|
| 793 |
+
|
| 794 |
+
RETURNS:
|
| 795 |
+
(
|
| 796 |
+
"Awesome! I have created the order for John Doe.\n\n**Here's a
|
| 797 |
+
summary:**...",
|
| 798 |
+
[
|
| 799 |
+
{"tool": "geocode_address", "input": {...}, "result":
|
| 800 |
+
{...}},
|
| 801 |
+
{"tool": "create_order", "input": {...}, "result": {...}}
|
| 802 |
+
]
|
| 803 |
+
)
|
| 804 |
+
|
| 805 |
+
---
|
| 806 |
+
STEP 22: Back Through All Functions
|
| 807 |
+
|
| 808 |
+
β Returns to: GeminiProvider.process_message() (line 206)
|
| 809 |
+
β Returns to: ChatEngine.process_message() (line 58)
|
| 810 |
+
β Returns to: handle_chat_message() (line 223)
|
| 811 |
+
β Returns to: send_message() (line 443)
|
| 812 |
+
β Returns to: Gradio UI (line 448)
|
| 813 |
+
|
| 814 |
+
---
|
| 815 |
+
STEP 23: Gradio Updates UI
|
| 816 |
+
|
| 817 |
+
FILE: ui/app.py
|
| 818 |
+
LINE: 448-452
|
| 819 |
+
|
| 820 |
+
send_btn.click(
|
| 821 |
+
fn=send_message,
|
| 822 |
+
outputs=[chatbot, tool_display, conversation_state, msg_input]
|
| 823 |
+
# β β
|
| 824 |
+
# Updates Shows tool calls
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
CHATBOT DISPLAYS:
|
| 828 |
+
User: "Create an order for John Doe at 123 Main St, San Francisco"
|
requirements.txt
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core Framework
|
| 2 |
+
gradio>=5.9.0
|
| 3 |
+
fastmcp>=0.3.0
|
| 4 |
+
|
| 5 |
+
# AI/ML
|
| 6 |
+
anthropic>=0.40.0
|
| 7 |
+
google-generativeai>=0.3.0
|
| 8 |
+
|
| 9 |
+
# Data & Database
|
| 10 |
+
pandas>=2.2.0
|
| 11 |
+
faker>=23.0.0
|
| 12 |
+
psycopg2-binary>=2.9.9
|
| 13 |
+
|
| 14 |
+
# API Clients
|
| 15 |
+
requests>=2.31.0
|
| 16 |
+
httpx>=0.27.1
|
| 17 |
+
|
| 18 |
+
# Utilities
|
| 19 |
+
python-dotenv>=1.0.0
|
| 20 |
+
pydantic>=2.5.3
|
| 21 |
+
|
| 22 |
+
# Testing
|
| 23 |
+
pytest>=8.0.0
|
| 24 |
+
pytest-asyncio>=0.23.0
|
| 25 |
+
|
| 26 |
+
# Type Checking
|
| 27 |
+
mypy>=1.8.0
|
scripts/init_db.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Database initialization script for FleetMind
|
| 3 |
+
Run this script to create the PostgreSQL database and tables
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Add parent directory to path to import database module
|
| 10 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 11 |
+
|
| 12 |
+
from database.connection import init_database, get_db_connection, test_connection
|
| 13 |
+
from database.schema import get_full_schema
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(
|
| 17 |
+
level=logging.INFO,
|
| 18 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 19 |
+
)
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def main():
|
| 24 |
+
"""Initialize the FleetMind PostgreSQL database"""
|
| 25 |
+
try:
|
| 26 |
+
logger.info("=" * 60)
|
| 27 |
+
logger.info("FleetMind Database Initialization")
|
| 28 |
+
logger.info("=" * 60)
|
| 29 |
+
|
| 30 |
+
# Test connection first
|
| 31 |
+
logger.info("\nStep 1: Testing PostgreSQL connection...")
|
| 32 |
+
if not test_connection():
|
| 33 |
+
logger.error("Failed to connect to PostgreSQL database")
|
| 34 |
+
logger.error("Please check your .env file and ensure PostgreSQL is running")
|
| 35 |
+
sys.exit(1)
|
| 36 |
+
|
| 37 |
+
logger.info("β PostgreSQL connection successful")
|
| 38 |
+
|
| 39 |
+
# Initialize database with full schema
|
| 40 |
+
logger.info("\nStep 2: Creating database schema...")
|
| 41 |
+
init_database()
|
| 42 |
+
|
| 43 |
+
# Verify tables were created
|
| 44 |
+
logger.info("\nStep 3: Verifying table creation...")
|
| 45 |
+
conn = get_db_connection()
|
| 46 |
+
cursor = conn.cursor()
|
| 47 |
+
|
| 48 |
+
# Get list of all tables
|
| 49 |
+
cursor.execute("""
|
| 50 |
+
SELECT table_name
|
| 51 |
+
FROM information_schema.tables
|
| 52 |
+
WHERE table_schema = 'public'
|
| 53 |
+
ORDER BY table_name
|
| 54 |
+
""")
|
| 55 |
+
tables = cursor.fetchall()
|
| 56 |
+
|
| 57 |
+
if tables:
|
| 58 |
+
logger.info(f"β Successfully created {len(tables)} tables:")
|
| 59 |
+
for table in tables:
|
| 60 |
+
logger.info(f" - {table['table_name']}")
|
| 61 |
+
|
| 62 |
+
# Get column count for each table
|
| 63 |
+
cursor.execute(f"""
|
| 64 |
+
SELECT COUNT(*) as column_count
|
| 65 |
+
FROM information_schema.columns
|
| 66 |
+
WHERE table_name = %s
|
| 67 |
+
""", (table['table_name'],))
|
| 68 |
+
col_count = cursor.fetchone()
|
| 69 |
+
logger.info(f" ({col_count['column_count']} columns)")
|
| 70 |
+
else:
|
| 71 |
+
logger.warning("No tables were created")
|
| 72 |
+
|
| 73 |
+
# Specifically check orders table
|
| 74 |
+
logger.info("\nStep 4: Verifying orders table...")
|
| 75 |
+
cursor.execute("""
|
| 76 |
+
SELECT column_name, data_type, is_nullable
|
| 77 |
+
FROM information_schema.columns
|
| 78 |
+
WHERE table_name = 'orders'
|
| 79 |
+
ORDER BY ordinal_position
|
| 80 |
+
""")
|
| 81 |
+
columns = cursor.fetchall()
|
| 82 |
+
|
| 83 |
+
if columns:
|
| 84 |
+
logger.info(f"β Orders table created with {len(columns)} columns:")
|
| 85 |
+
for col in columns[:10]: # Show first 10 columns
|
| 86 |
+
nullable = "NULL" if col['is_nullable'] == 'YES' else "NOT NULL"
|
| 87 |
+
logger.info(f" - {col['column_name']}: {col['data_type']} ({nullable})")
|
| 88 |
+
|
| 89 |
+
if len(columns) > 10:
|
| 90 |
+
logger.info(f" ... and {len(columns) - 10} more columns")
|
| 91 |
+
else:
|
| 92 |
+
logger.error("β Orders table was not created")
|
| 93 |
+
|
| 94 |
+
conn.close()
|
| 95 |
+
|
| 96 |
+
logger.info("\n" + "=" * 60)
|
| 97 |
+
logger.info("Database initialization completed successfully!")
|
| 98 |
+
logger.info("=" * 60)
|
| 99 |
+
logger.info("\nYou can now:")
|
| 100 |
+
logger.info(" 1. Test the database: python scripts/test_db.py")
|
| 101 |
+
logger.info(" 2. Start the application: python ui/app.py")
|
| 102 |
+
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"\nβ Database initialization failed: {e}")
|
| 105 |
+
logger.error("\nTroubleshooting:")
|
| 106 |
+
logger.error(" 1. Ensure PostgreSQL is installed and running")
|
| 107 |
+
logger.error(" 2. Create a database named 'fleetmind' in PostgreSQL")
|
| 108 |
+
logger.error(" Command: createdb fleetmind")
|
| 109 |
+
logger.error(" 3. Update .env file with correct database credentials")
|
| 110 |
+
logger.error(" 4. Verify PostgreSQL user has necessary permissions")
|
| 111 |
+
sys.exit(1)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
if __name__ == "__main__":
|
| 115 |
+
main()
|
scripts/test_db.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test script to verify PostgreSQL database operations
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from datetime import datetime, timedelta
|
| 8 |
+
|
| 9 |
+
# Add parent directory to path
|
| 10 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 11 |
+
|
| 12 |
+
from database.connection import execute_query, execute_write
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO)
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def test_insert_order():
|
| 20 |
+
"""Test inserting a new order"""
|
| 21 |
+
logger.info("Testing order insertion...")
|
| 22 |
+
|
| 23 |
+
now = datetime.now()
|
| 24 |
+
time_window_start = now + timedelta(hours=2)
|
| 25 |
+
time_window_end = now + timedelta(hours=6)
|
| 26 |
+
|
| 27 |
+
query = """
|
| 28 |
+
INSERT INTO orders (
|
| 29 |
+
order_id, customer_name, customer_phone, customer_email,
|
| 30 |
+
delivery_address, delivery_lat, delivery_lng,
|
| 31 |
+
time_window_start, time_window_end,
|
| 32 |
+
priority, weight_kg, status
|
| 33 |
+
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
params = (
|
| 37 |
+
"ORD-TEST-001",
|
| 38 |
+
"John Doe",
|
| 39 |
+
"+1-555-0123",
|
| 40 |
+
"john.doe@example.com",
|
| 41 |
+
"123 Main Street, San Francisco, CA 94103",
|
| 42 |
+
37.7749,
|
| 43 |
+
-122.4194,
|
| 44 |
+
time_window_start,
|
| 45 |
+
time_window_end,
|
| 46 |
+
"standard",
|
| 47 |
+
5.5,
|
| 48 |
+
"pending"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
result = execute_write(query, params)
|
| 53 |
+
logger.info(f"β Order inserted successfully (rows affected: {result})")
|
| 54 |
+
return True
|
| 55 |
+
except Exception as e:
|
| 56 |
+
logger.error(f"β Failed to insert order: {e}")
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def test_query_orders():
|
| 61 |
+
"""Test querying orders"""
|
| 62 |
+
logger.info("Testing order query...")
|
| 63 |
+
|
| 64 |
+
query = "SELECT * FROM orders WHERE status = %s"
|
| 65 |
+
params = ("pending",)
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
results = execute_query(query, params)
|
| 69 |
+
logger.info(f"β Query successful: Found {len(results)} pending orders")
|
| 70 |
+
|
| 71 |
+
for row in results:
|
| 72 |
+
logger.info(f" Order ID: {row['order_id']}")
|
| 73 |
+
logger.info(f" Customer: {row['customer_name']}")
|
| 74 |
+
logger.info(f" Address: {row['delivery_address']}")
|
| 75 |
+
logger.info(f" Priority: {row['priority']}")
|
| 76 |
+
logger.info(f" Status: {row['status']}")
|
| 77 |
+
logger.info(" ---")
|
| 78 |
+
|
| 79 |
+
return True
|
| 80 |
+
except Exception as e:
|
| 81 |
+
logger.error(f"β Failed to query orders: {e}")
|
| 82 |
+
return False
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def test_update_order():
|
| 86 |
+
"""Test updating an order"""
|
| 87 |
+
logger.info("Testing order update...")
|
| 88 |
+
|
| 89 |
+
query = "UPDATE orders SET status = %s, assigned_driver_id = %s WHERE order_id = %s"
|
| 90 |
+
params = ("assigned", "DRV-001", "ORD-TEST-001")
|
| 91 |
+
|
| 92 |
+
try:
|
| 93 |
+
result = execute_write(query, params)
|
| 94 |
+
logger.info(f"β Order updated successfully (rows affected: {result})")
|
| 95 |
+
|
| 96 |
+
# Verify update
|
| 97 |
+
verify_query = "SELECT status, assigned_driver_id FROM orders WHERE order_id = %s"
|
| 98 |
+
verify_result = execute_query(verify_query, ("ORD-TEST-001",))
|
| 99 |
+
|
| 100 |
+
if verify_result:
|
| 101 |
+
row = verify_result[0]
|
| 102 |
+
logger.info(f" New status: {row['status']}")
|
| 103 |
+
logger.info(f" Assigned driver: {row['assigned_driver_id']}")
|
| 104 |
+
|
| 105 |
+
return True
|
| 106 |
+
except Exception as e:
|
| 107 |
+
logger.error(f"β Failed to update order: {e}")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_delete_order():
|
| 112 |
+
"""Test deleting the test order"""
|
| 113 |
+
logger.info("Testing order deletion (cleanup)...")
|
| 114 |
+
|
| 115 |
+
query = "DELETE FROM orders WHERE order_id = %s"
|
| 116 |
+
params = ("ORD-TEST-001",)
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
result = execute_write(query, params)
|
| 120 |
+
logger.info(f"β Order deleted successfully (rows affected: {result})")
|
| 121 |
+
return True
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"β Failed to delete order: {e}")
|
| 124 |
+
return False
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def main():
|
| 128 |
+
"""Run all database tests"""
|
| 129 |
+
logger.info("=" * 50)
|
| 130 |
+
logger.info("Starting FleetMind PostgreSQL Database Tests")
|
| 131 |
+
logger.info("=" * 50)
|
| 132 |
+
|
| 133 |
+
tests = [
|
| 134 |
+
("Insert Order", test_insert_order),
|
| 135 |
+
("Query Orders", test_query_orders),
|
| 136 |
+
("Update Order", test_update_order),
|
| 137 |
+
("Delete Order", test_delete_order),
|
| 138 |
+
]
|
| 139 |
+
|
| 140 |
+
results = []
|
| 141 |
+
for test_name, test_func in tests:
|
| 142 |
+
logger.info(f"\n--- {test_name} ---")
|
| 143 |
+
success = test_func()
|
| 144 |
+
results.append((test_name, success))
|
| 145 |
+
|
| 146 |
+
# Summary
|
| 147 |
+
logger.info("\n" + "=" * 50)
|
| 148 |
+
logger.info("Test Summary")
|
| 149 |
+
logger.info("=" * 50)
|
| 150 |
+
|
| 151 |
+
passed = sum(1 for _, success in results if success)
|
| 152 |
+
total = len(results)
|
| 153 |
+
|
| 154 |
+
for test_name, success in results:
|
| 155 |
+
status = "β PASSED" if success else "β FAILED"
|
| 156 |
+
logger.info(f"{test_name}: {status}")
|
| 157 |
+
|
| 158 |
+
logger.info(f"\nTotal: {passed}/{total} tests passed")
|
| 159 |
+
|
| 160 |
+
if passed == total:
|
| 161 |
+
logger.info("\nπ All tests passed! Your PostgreSQL database is working correctly!")
|
| 162 |
+
return 0
|
| 163 |
+
else:
|
| 164 |
+
logger.error("\nβ Some tests failed. Please check the errors above.")
|
| 165 |
+
return 1
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
if __name__ == "__main__":
|
| 169 |
+
sys.exit(main())
|
ui/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
UI package for FleetMind
|
| 3 |
+
Contains Gradio interface components
|
| 4 |
+
"""
|
ui/app.py
ADDED
|
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
FleetMind MCP - Gradio Web Interface
|
| 3 |
+
Simple dashboard to interact with the MCP server and database
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
# Add parent directory to path
|
| 10 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 11 |
+
|
| 12 |
+
import gradio as gr
|
| 13 |
+
from database.connection import execute_query, execute_write, test_connection
|
| 14 |
+
from datetime import datetime, timedelta
|
| 15 |
+
import json
|
| 16 |
+
|
| 17 |
+
# Import chat functionality
|
| 18 |
+
from chat.chat_engine import ChatEngine
|
| 19 |
+
from chat.conversation import ConversationManager
|
| 20 |
+
from chat.geocoding import GeocodingService
|
| 21 |
+
|
| 22 |
+
# ============================================
|
| 23 |
+
# DATABASE FUNCTIONS
|
| 24 |
+
# ============================================
|
| 25 |
+
|
| 26 |
+
def get_database_status():
|
| 27 |
+
"""Check if database is connected"""
|
| 28 |
+
try:
|
| 29 |
+
if test_connection():
|
| 30 |
+
return "β
Connected", "success"
|
| 31 |
+
else:
|
| 32 |
+
return "β Disconnected", "error"
|
| 33 |
+
except Exception as e:
|
| 34 |
+
return f"β Error: {str(e)}", "error"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def get_orders_summary():
|
| 38 |
+
"""Get summary of orders by status"""
|
| 39 |
+
try:
|
| 40 |
+
query = """
|
| 41 |
+
SELECT
|
| 42 |
+
status,
|
| 43 |
+
COUNT(*) as count
|
| 44 |
+
FROM orders
|
| 45 |
+
GROUP BY status
|
| 46 |
+
ORDER BY count DESC
|
| 47 |
+
"""
|
| 48 |
+
results = execute_query(query)
|
| 49 |
+
|
| 50 |
+
if not results:
|
| 51 |
+
return "No orders in database"
|
| 52 |
+
|
| 53 |
+
summary = "**Orders Summary:**\n\n"
|
| 54 |
+
for row in results:
|
| 55 |
+
summary += f"- {row['status'].upper()}: {row['count']}\n"
|
| 56 |
+
|
| 57 |
+
return summary
|
| 58 |
+
except Exception as e:
|
| 59 |
+
return f"Error: {str(e)}"
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_all_orders():
|
| 63 |
+
"""Get all orders from database"""
|
| 64 |
+
try:
|
| 65 |
+
query = """
|
| 66 |
+
SELECT
|
| 67 |
+
order_id,
|
| 68 |
+
customer_name,
|
| 69 |
+
delivery_address,
|
| 70 |
+
status,
|
| 71 |
+
priority,
|
| 72 |
+
created_at
|
| 73 |
+
FROM orders
|
| 74 |
+
ORDER BY created_at DESC
|
| 75 |
+
LIMIT 50
|
| 76 |
+
"""
|
| 77 |
+
results = execute_query(query)
|
| 78 |
+
|
| 79 |
+
if not results:
|
| 80 |
+
return [["No orders found", "", "", "", "", ""]]
|
| 81 |
+
|
| 82 |
+
# Convert to list of lists for Gradio dataframe
|
| 83 |
+
data = []
|
| 84 |
+
for row in results:
|
| 85 |
+
data.append([
|
| 86 |
+
row['order_id'],
|
| 87 |
+
row['customer_name'],
|
| 88 |
+
row['delivery_address'][:50] + "..." if len(row['delivery_address']) > 50 else row['delivery_address'],
|
| 89 |
+
row['status'],
|
| 90 |
+
row['priority'],
|
| 91 |
+
str(row['created_at'])
|
| 92 |
+
])
|
| 93 |
+
|
| 94 |
+
return data
|
| 95 |
+
except Exception as e:
|
| 96 |
+
return [[f"Error: {str(e)}", "", "", "", "", ""]]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def create_sample_order():
|
| 100 |
+
"""Create a sample order for testing"""
|
| 101 |
+
try:
|
| 102 |
+
now = datetime.now()
|
| 103 |
+
order_id = f"ORD-{now.strftime('%Y%m%d%H%M%S')}"
|
| 104 |
+
|
| 105 |
+
query = """
|
| 106 |
+
INSERT INTO orders (
|
| 107 |
+
order_id, customer_name, customer_phone, customer_email,
|
| 108 |
+
delivery_address, delivery_lat, delivery_lng,
|
| 109 |
+
time_window_start, time_window_end,
|
| 110 |
+
priority, weight_kg, status
|
| 111 |
+
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
params = (
|
| 115 |
+
order_id,
|
| 116 |
+
"Sample Customer",
|
| 117 |
+
"+1-555-0100",
|
| 118 |
+
"sample@example.com",
|
| 119 |
+
"456 Sample Street, San Francisco, CA 94103",
|
| 120 |
+
37.7749,
|
| 121 |
+
-122.4194,
|
| 122 |
+
now + timedelta(hours=2),
|
| 123 |
+
now + timedelta(hours=6),
|
| 124 |
+
"standard",
|
| 125 |
+
10.5,
|
| 126 |
+
"pending"
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
execute_write(query, params)
|
| 130 |
+
return f"β
Order {order_id} created successfully!", get_all_orders()
|
| 131 |
+
except Exception as e:
|
| 132 |
+
return f"β Error: {str(e)}", get_all_orders()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def search_orders(search_term):
|
| 136 |
+
"""Search orders by customer name or order ID"""
|
| 137 |
+
try:
|
| 138 |
+
if not search_term:
|
| 139 |
+
return get_all_orders()
|
| 140 |
+
|
| 141 |
+
query = """
|
| 142 |
+
SELECT
|
| 143 |
+
order_id,
|
| 144 |
+
customer_name,
|
| 145 |
+
delivery_address,
|
| 146 |
+
status,
|
| 147 |
+
priority,
|
| 148 |
+
created_at
|
| 149 |
+
FROM orders
|
| 150 |
+
WHERE
|
| 151 |
+
order_id ILIKE %s OR
|
| 152 |
+
customer_name ILIKE %s
|
| 153 |
+
ORDER BY created_at DESC
|
| 154 |
+
LIMIT 50
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
search_pattern = f"%{search_term}%"
|
| 158 |
+
results = execute_query(query, (search_pattern, search_pattern))
|
| 159 |
+
|
| 160 |
+
if not results:
|
| 161 |
+
return [["No matching orders found", "", "", "", "", ""]]
|
| 162 |
+
|
| 163 |
+
data = []
|
| 164 |
+
for row in results:
|
| 165 |
+
data.append([
|
| 166 |
+
row['order_id'],
|
| 167 |
+
row['customer_name'],
|
| 168 |
+
row['delivery_address'][:50] + "..." if len(row['delivery_address']) > 50 else row['delivery_address'],
|
| 169 |
+
row['status'],
|
| 170 |
+
row['priority'],
|
| 171 |
+
str(row['created_at'])
|
| 172 |
+
])
|
| 173 |
+
|
| 174 |
+
return data
|
| 175 |
+
except Exception as e:
|
| 176 |
+
return [[f"Error: {str(e)}", "", "", "", "", ""]]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# ============================================
|
| 180 |
+
# CHAT FUNCTIONS
|
| 181 |
+
# ============================================
|
| 182 |
+
|
| 183 |
+
# Initialize chat engine and geocoding service
|
| 184 |
+
chat_engine = ChatEngine()
|
| 185 |
+
geocoding_service = GeocodingService()
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def get_api_status():
|
| 189 |
+
"""Get API status for chat"""
|
| 190 |
+
# Get full status for all providers
|
| 191 |
+
full_status = chat_engine.get_full_status()
|
| 192 |
+
selected = full_status["selected"]
|
| 193 |
+
claude_status = full_status["claude"]["status"]
|
| 194 |
+
gemini_status = full_status["gemini"]["status"]
|
| 195 |
+
|
| 196 |
+
geocoding_status = geocoding_service.get_status()
|
| 197 |
+
|
| 198 |
+
# Mark selected provider
|
| 199 |
+
claude_marker = "π― **ACTIVE** - " if selected == "anthropic" else ""
|
| 200 |
+
gemini_marker = "π― **ACTIVE** - " if selected == "gemini" else ""
|
| 201 |
+
|
| 202 |
+
return f"""### API Status
|
| 203 |
+
|
| 204 |
+
**AI Provider:**
|
| 205 |
+
|
| 206 |
+
**Claude (Anthropic):**
|
| 207 |
+
{claude_marker}{claude_status}
|
| 208 |
+
|
| 209 |
+
**Gemini (Google):**
|
| 210 |
+
{gemini_marker}{gemini_status}
|
| 211 |
+
|
| 212 |
+
*π‘ Switch provider by setting `AI_PROVIDER=anthropic` or `AI_PROVIDER=gemini` in .env*
|
| 213 |
+
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
**Geocoding:**
|
| 217 |
+
|
| 218 |
+
**HERE Maps:**
|
| 219 |
+
{geocoding_status}
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def handle_chat_message(message, conversation_state):
|
| 224 |
+
"""
|
| 225 |
+
Handle chat message from user
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
message: User's message
|
| 229 |
+
conversation_state: ConversationManager instance
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
Updated chatbot history, tool display, conversation state
|
| 233 |
+
"""
|
| 234 |
+
if not message.strip():
|
| 235 |
+
return conversation_state.get_formatted_history(), conversation_state.get_tool_calls(), conversation_state
|
| 236 |
+
|
| 237 |
+
# Process message through chat engine
|
| 238 |
+
response, tool_calls = chat_engine.process_message(message, conversation_state)
|
| 239 |
+
|
| 240 |
+
# Return updated UI
|
| 241 |
+
return conversation_state.get_formatted_history(), conversation_state.get_tool_calls(), conversation_state
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def reset_conversation():
|
| 245 |
+
"""Reset conversation to start fresh"""
|
| 246 |
+
new_conversation = ConversationManager()
|
| 247 |
+
|
| 248 |
+
# Add welcome message
|
| 249 |
+
welcome = chat_engine.get_welcome_message()
|
| 250 |
+
new_conversation.add_message("assistant", welcome)
|
| 251 |
+
|
| 252 |
+
return (
|
| 253 |
+
new_conversation.get_formatted_history(),
|
| 254 |
+
[], # Clear tool calls
|
| 255 |
+
new_conversation
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def get_initial_chat():
|
| 260 |
+
"""Get initial chat state with welcome message"""
|
| 261 |
+
conversation = ConversationManager()
|
| 262 |
+
welcome = chat_engine.get_welcome_message()
|
| 263 |
+
conversation.add_message("assistant", welcome)
|
| 264 |
+
|
| 265 |
+
return conversation.get_formatted_history(), [], conversation
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# ============================================
|
| 269 |
+
# MCP SERVER INFO
|
| 270 |
+
# ============================================
|
| 271 |
+
|
| 272 |
+
def get_mcp_server_info():
|
| 273 |
+
"""Get MCP server information"""
|
| 274 |
+
mcp_info = {
|
| 275 |
+
"server_name": "dispatch-coordinator-mcp",
|
| 276 |
+
"version": "1.0.0",
|
| 277 |
+
"status": "Ready",
|
| 278 |
+
"tools": [
|
| 279 |
+
"route_optimizer",
|
| 280 |
+
"geocoder",
|
| 281 |
+
"weather_monitor",
|
| 282 |
+
"traffic_checker",
|
| 283 |
+
"distance_matrix",
|
| 284 |
+
"order_manager"
|
| 285 |
+
]
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
return f"""
|
| 289 |
+
### MCP Server Information
|
| 290 |
+
|
| 291 |
+
**Name:** {mcp_info['server_name']}
|
| 292 |
+
**Version:** {mcp_info['version']}
|
| 293 |
+
**Status:** π’ {mcp_info['status']}
|
| 294 |
+
|
| 295 |
+
**Available Tools ({len(mcp_info['tools'])}):**
|
| 296 |
+
{chr(10).join([f"- {tool}" for tool in mcp_info['tools']])}
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
# ============================================
|
| 301 |
+
# GRADIO INTERFACE
|
| 302 |
+
# ============================================
|
| 303 |
+
|
| 304 |
+
def create_interface():
|
| 305 |
+
"""Create the Gradio interface"""
|
| 306 |
+
|
| 307 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="FleetMind MCP Dashboard") as app:
|
| 308 |
+
|
| 309 |
+
gr.Markdown("# π FleetMind MCP Dashboard")
|
| 310 |
+
gr.Markdown("*Autonomous Dispatch Coordinator powered by MCP and PostgreSQL*")
|
| 311 |
+
|
| 312 |
+
with gr.Tabs():
|
| 313 |
+
|
| 314 |
+
# ==========================================
|
| 315 |
+
# TAB 1: OVERVIEW
|
| 316 |
+
# ==========================================
|
| 317 |
+
with gr.Tab("π Overview"):
|
| 318 |
+
with gr.Row():
|
| 319 |
+
with gr.Column(scale=1):
|
| 320 |
+
gr.Markdown("### System Status")
|
| 321 |
+
db_status = gr.Textbox(
|
| 322 |
+
label="Database Connection",
|
| 323 |
+
value=get_database_status()[0],
|
| 324 |
+
interactive=False
|
| 325 |
+
)
|
| 326 |
+
refresh_status_btn = gr.Button("π Refresh Status", size="sm")
|
| 327 |
+
|
| 328 |
+
gr.Markdown("---")
|
| 329 |
+
orders_summary = gr.Markdown(get_orders_summary())
|
| 330 |
+
|
| 331 |
+
with gr.Column(scale=2):
|
| 332 |
+
mcp_info = gr.Markdown(get_mcp_server_info())
|
| 333 |
+
|
| 334 |
+
# Refresh status button action
|
| 335 |
+
refresh_status_btn.click(
|
| 336 |
+
fn=lambda: get_database_status()[0],
|
| 337 |
+
outputs=db_status
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
# ==========================================
|
| 341 |
+
# TAB 2: ORDERS MANAGEMENT
|
| 342 |
+
# ==========================================
|
| 343 |
+
with gr.Tab("π¦ Orders"):
|
| 344 |
+
gr.Markdown("### Orders Management")
|
| 345 |
+
|
| 346 |
+
with gr.Row():
|
| 347 |
+
search_box = gr.Textbox(
|
| 348 |
+
placeholder="Search by Order ID or Customer Name...",
|
| 349 |
+
label="Search Orders",
|
| 350 |
+
scale=3
|
| 351 |
+
)
|
| 352 |
+
search_btn = gr.Button("π Search", scale=1)
|
| 353 |
+
create_btn = gr.Button("β Create Sample Order", scale=1, variant="primary")
|
| 354 |
+
|
| 355 |
+
create_result = gr.Textbox(label="Result", visible=False)
|
| 356 |
+
|
| 357 |
+
orders_table = gr.Dataframe(
|
| 358 |
+
headers=["Order ID", "Customer", "Delivery Address", "Status", "Priority", "Created At"],
|
| 359 |
+
datatype=["str", "str", "str", "str", "str", "str"],
|
| 360 |
+
label="Orders List",
|
| 361 |
+
value=get_all_orders(),
|
| 362 |
+
interactive=False,
|
| 363 |
+
wrap=True
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
refresh_orders_btn = gr.Button("π Refresh Orders")
|
| 367 |
+
|
| 368 |
+
# Button actions
|
| 369 |
+
create_btn.click(
|
| 370 |
+
fn=create_sample_order,
|
| 371 |
+
outputs=[create_result, orders_table]
|
| 372 |
+
).then(
|
| 373 |
+
fn=lambda: gr.update(visible=True),
|
| 374 |
+
outputs=create_result
|
| 375 |
+
).then(
|
| 376 |
+
fn=lambda: get_orders_summary(),
|
| 377 |
+
outputs=orders_summary
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
search_btn.click(
|
| 381 |
+
fn=search_orders,
|
| 382 |
+
inputs=search_box,
|
| 383 |
+
outputs=orders_table
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
search_box.submit(
|
| 387 |
+
fn=search_orders,
|
| 388 |
+
inputs=search_box,
|
| 389 |
+
outputs=orders_table
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
refresh_orders_btn.click(
|
| 393 |
+
fn=get_all_orders,
|
| 394 |
+
outputs=orders_table
|
| 395 |
+
).then(
|
| 396 |
+
fn=lambda: get_orders_summary(),
|
| 397 |
+
outputs=orders_summary
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
# ==========================================
|
| 401 |
+
# TAB 3: AI CHAT
|
| 402 |
+
# ==========================================
|
| 403 |
+
with gr.Tab("π¬ Chat"):
|
| 404 |
+
provider_name = chat_engine.get_provider_name()
|
| 405 |
+
model_name = chat_engine.get_model_name()
|
| 406 |
+
|
| 407 |
+
gr.Markdown(f"### AI Order Assistant")
|
| 408 |
+
gr.Markdown(f"*Powered by: **{provider_name}** ({model_name})*")
|
| 409 |
+
|
| 410 |
+
# API Status
|
| 411 |
+
api_status = gr.Markdown(get_api_status())
|
| 412 |
+
|
| 413 |
+
# Chat interface
|
| 414 |
+
chatbot = gr.Chatbot(
|
| 415 |
+
label="Order Assistant",
|
| 416 |
+
height=500,
|
| 417 |
+
type="messages",
|
| 418 |
+
show_copy_button=True
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
msg_input = gr.Textbox(
|
| 422 |
+
placeholder="e.g., 'Create an order for John Doe at 123 Main St, deliver by 5 PM'",
|
| 423 |
+
label="Your Message",
|
| 424 |
+
lines=2
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
with gr.Row():
|
| 428 |
+
send_btn = gr.Button("π€ Send", variant="primary", scale=2)
|
| 429 |
+
clear_btn = gr.Button("π Clear Chat", scale=1)
|
| 430 |
+
|
| 431 |
+
# Tool usage display (reasoning transparency)
|
| 432 |
+
with gr.Accordion("π§ Tool Usage (AI Reasoning)", open=False):
|
| 433 |
+
gr.Markdown("See what tools the AI is using behind the scenes:")
|
| 434 |
+
tool_display = gr.JSON(label="Tools Called")
|
| 435 |
+
|
| 436 |
+
# Conversation state
|
| 437 |
+
conversation_state = gr.State(value=None)
|
| 438 |
+
|
| 439 |
+
# Initialize with welcome message
|
| 440 |
+
chatbot.value, tool_display.value, conversation_state.value = get_initial_chat()
|
| 441 |
+
|
| 442 |
+
# Event handlers
|
| 443 |
+
def send_message(message, conv_state):
|
| 444 |
+
"""Handle send button click"""
|
| 445 |
+
chat_history, tools, new_state = handle_chat_message(message, conv_state)
|
| 446 |
+
return chat_history, tools, new_state, "" # Clear input
|
| 447 |
+
|
| 448 |
+
send_btn.click(
|
| 449 |
+
fn=send_message,
|
| 450 |
+
inputs=[msg_input, conversation_state],
|
| 451 |
+
outputs=[chatbot, tool_display, conversation_state, msg_input]
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
msg_input.submit(
|
| 455 |
+
fn=send_message,
|
| 456 |
+
inputs=[msg_input, conversation_state],
|
| 457 |
+
outputs=[chatbot, tool_display, conversation_state, msg_input]
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
clear_btn.click(
|
| 461 |
+
fn=reset_conversation,
|
| 462 |
+
outputs=[chatbot, tool_display, conversation_state]
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
# ==========================================
|
| 466 |
+
# TAB 4: MCP TOOLS (Coming Soon)
|
| 467 |
+
# ==========================================
|
| 468 |
+
with gr.Tab("π§ MCP Tools"):
|
| 469 |
+
gr.Markdown("### MCP Tools")
|
| 470 |
+
gr.Markdown("*MCP tool integration coming soon...*")
|
| 471 |
+
|
| 472 |
+
gr.Markdown("""
|
| 473 |
+
Available tools:
|
| 474 |
+
- **route_optimizer** - Optimize delivery routes
|
| 475 |
+
- **geocoder** - Convert addresses to coordinates
|
| 476 |
+
- **weather_monitor** - Check weather conditions
|
| 477 |
+
- **traffic_checker** - Monitor traffic conditions
|
| 478 |
+
- **distance_matrix** - Calculate distances
|
| 479 |
+
- **order_manager** - Manage orders via MCP
|
| 480 |
+
""")
|
| 481 |
+
|
| 482 |
+
# ==========================================
|
| 483 |
+
# TAB 5: DATABASE INFO
|
| 484 |
+
# ==========================================
|
| 485 |
+
with gr.Tab("πΎ Database"):
|
| 486 |
+
gr.Markdown("### Database Information")
|
| 487 |
+
|
| 488 |
+
db_info = gr.Markdown(f"""
|
| 489 |
+
**Database:** PostgreSQL
|
| 490 |
+
**Name:** fleetmind
|
| 491 |
+
**Host:** localhost
|
| 492 |
+
**Port:** 5432
|
| 493 |
+
|
| 494 |
+
**Tables:**
|
| 495 |
+
- orders (26 columns)
|
| 496 |
+
- drivers (coming soon)
|
| 497 |
+
- assignments (coming soon)
|
| 498 |
+
- exceptions (coming soon)
|
| 499 |
+
""")
|
| 500 |
+
|
| 501 |
+
test_db_btn = gr.Button("π§ͺ Test Connection", variant="primary")
|
| 502 |
+
test_result = gr.Textbox(label="Test Result", interactive=False)
|
| 503 |
+
|
| 504 |
+
test_db_btn.click(
|
| 505 |
+
fn=lambda: "β
Connection successful!" if test_connection() else "β Connection failed",
|
| 506 |
+
outputs=test_result
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
gr.Markdown("---")
|
| 510 |
+
gr.Markdown("*FleetMind MCP v1.0.0 - Built with Gradio, PostgreSQL, and FastMCP*")
|
| 511 |
+
|
| 512 |
+
return app
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
# ============================================
|
| 516 |
+
# MAIN
|
| 517 |
+
# ============================================
|
| 518 |
+
|
| 519 |
+
if __name__ == "__main__":
|
| 520 |
+
print("=" * 60)
|
| 521 |
+
print("FleetMind MCP - Starting Gradio Server")
|
| 522 |
+
print("=" * 60)
|
| 523 |
+
|
| 524 |
+
# Check database connection
|
| 525 |
+
print("\nChecking database connection...")
|
| 526 |
+
if test_connection():
|
| 527 |
+
print("β
Database connected")
|
| 528 |
+
else:
|
| 529 |
+
print("β Database connection failed")
|
| 530 |
+
print("Please check your .env file and PostgreSQL server")
|
| 531 |
+
|
| 532 |
+
print("\nStarting Gradio interface...")
|
| 533 |
+
print("=" * 60)
|
| 534 |
+
|
| 535 |
+
# Create and launch the interface
|
| 536 |
+
app = create_interface()
|
| 537 |
+
app.launch(
|
| 538 |
+
server_name="0.0.0.0", # Allow external connections for HF Spaces
|
| 539 |
+
server_port=7860,
|
| 540 |
+
share=False,
|
| 541 |
+
show_error=True
|
| 542 |
+
)
|