Maga222006
commited on
Commit
·
e6a90e9
0
Parent(s):
MultiagentPersonalAssistant
Browse files- .DS_Store +0 -0
- .idea/.gitignore +8 -0
- .idea/vcs.xml +4 -0
- Dockerfile +32 -0
- README.md +136 -0
- __pycache__/app.cpython-311.pyc +0 -0
- __pycache__/app.cpython-312.pyc +0 -0
- agent/__pycache__/coder_agent.cpython-312.pyc +0 -0
- agent/__pycache__/deep_research_agent.cpython-312.pyc +0 -0
- agent/__pycache__/file_preprocessing.cpython-312.pyc +0 -0
- agent/__pycache__/models.cpython-312.pyc +0 -0
- agent/__pycache__/multi_agent.cpython-312.pyc +0 -0
- agent/__pycache__/prompts.cpython-312.pyc +0 -0
- agent/__pycache__/states.cpython-312.pyc +0 -0
- agent/__pycache__/tools.cpython-311.pyc +0 -0
- agent/__pycache__/tools.cpython-312.pyc +0 -0
- agent/coder_agent.py +39 -0
- agent/deep_research_agent.py +39 -0
- agent/file_preprocessing.py +92 -0
- agent/models.py +15 -0
- agent/multi_agent.py +164 -0
- agent/prompts.py +48 -0
- agent/states.py +14 -0
- agent/tools.py +179 -0
- app.py +131 -0
- database_files/main.db +0 -0
- database_interaction/__pycache__/config.cpython-311.pyc +0 -0
- database_interaction/__pycache__/config.cpython-312.pyc +0 -0
- database_interaction/__pycache__/models.cpython-311.pyc +0 -0
- database_interaction/__pycache__/models.cpython-312.pyc +0 -0
- database_interaction/__pycache__/user.cpython-311.pyc +0 -0
- database_interaction/__pycache__/user.cpython-312.pyc +0 -0
- database_interaction/config.py +82 -0
- database_interaction/models.py +24 -0
- database_interaction/user.py +91 -0
- requirements.txt +48 -0
- tmp/classifier.ckpt +1 -0
- tmp/embedding_model.ckpt +1 -0
- tmp/hyperparams.yaml +1 -0
- tmp/label_encoder.ckpt +1 -0
.DS_Store
ADDED
|
Binary file (8.2 kB). View file
|
|
|
.idea/.gitignore
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Default ignored files
|
| 2 |
+
/shelf/
|
| 3 |
+
/workspace.xml
|
| 4 |
+
# Editor-based HTTP Client requests
|
| 5 |
+
/httpRequests/
|
| 6 |
+
# Datasource local storage ignored files
|
| 7 |
+
/dataSources/
|
| 8 |
+
/dataSources.local.xml
|
.idea/vcs.xml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="VcsDirectoryMappings" defaultProject="true" />
|
| 4 |
+
</project>
|
Dockerfile
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
# Install build-essential and gcc
|
| 4 |
+
USER root
|
| 5 |
+
RUN apt-get update && \
|
| 6 |
+
apt-get install -y build-essential gcc && \
|
| 7 |
+
rm -rf /var/lib/apt/lists/*
|
| 8 |
+
|
| 9 |
+
# Create a non-root user with UID 1000
|
| 10 |
+
RUN useradd -m -u 1000 user
|
| 11 |
+
|
| 12 |
+
# Switch to the new user
|
| 13 |
+
USER user
|
| 14 |
+
|
| 15 |
+
# Set the working directory to the user's home directory
|
| 16 |
+
WORKDIR /home/user/app
|
| 17 |
+
|
| 18 |
+
# Copy requirements first to leverage Docker cache
|
| 19 |
+
COPY --chown=user requirements.txt .
|
| 20 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 21 |
+
|
| 22 |
+
# Copy the rest of the application
|
| 23 |
+
COPY --chown=user . .
|
| 24 |
+
|
| 25 |
+
# Expose the port the app runs on
|
| 26 |
+
EXPOSE 7860
|
| 27 |
+
|
| 28 |
+
# Ensure the PATH includes the user's local bin
|
| 29 |
+
ENV PATH=/home/user/.local/bin:$PATH
|
| 30 |
+
|
| 31 |
+
# Command to run the application using uvicorn
|
| 32 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
title: 🧠 Multi-Agent AI Assistant
|
| 4 |
+
sdk: docker
|
| 5 |
+
emoji: 💻
|
| 6 |
+
colorFrom: indigo
|
| 7 |
+
colorTo: indigo
|
| 8 |
+
pinned: true
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# 🧠 Multi-Agent AI Assistant
|
| 12 |
+
|
| 13 |
+
This project is a **voice- and text-based AI assistant** powered by **FastAPI**, **LangGraph**, and **LangChain**. It supports:
|
| 14 |
+
- 🤖 Multi-agent system
|
| 15 |
+
- 🗣️ Voice input via speech-to-text
|
| 16 |
+
- 🌄 Image processing
|
| 17 |
+
- 📂 File processing
|
| 18 |
+
- 🌐 Web search, GitHub actions, weather, time, and geolocation tools
|
| 19 |
+
- 🧠 Memory-based conversation history per user
|
| 20 |
+
- 📍 Location-aware capabilities (e.g., timezone, weather)
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
## 🚀 Features
|
| 25 |
+
|
| 26 |
+
- **FastAPI backend** with `/text`, `/image` and `/voice` endpoints
|
| 27 |
+
- **LangGraph agents** with memory, tools, and typed state
|
| 28 |
+
- **User database** with persistent settings and config
|
| 29 |
+
- **Agents**:
|
| 30 |
+
- Supervisor agent
|
| 31 |
+
- Deep research agent
|
| 32 |
+
- Coding agent
|
| 33 |
+
- **Supervisor tools**:
|
| 34 |
+
- Web search
|
| 35 |
+
- Current time
|
| 36 |
+
- Weather
|
| 37 |
+
- Yahoo finance news
|
| 38 |
+
- **Deep research tools**:
|
| 39 |
+
- Web search
|
| 40 |
+
- Wikipedia
|
| 41 |
+
- Yahoo finance news
|
| 42 |
+
- **Coder tools**:
|
| 43 |
+
- Github toolkit
|
| 44 |
+
- Web search (For docs research)
|
| 45 |
+
|
| 46 |
+
---
|
| 47 |
+
|
| 48 |
+
## 🛠️ Setup Instructions
|
| 49 |
+
|
| 50 |
+
### 1. Environment
|
| 51 |
+
|
| 52 |
+
This project requires no manual environment configuration — all secrets are passed dynamically with each request and stored in the database.
|
| 53 |
+
|
| 54 |
+
Just run:
|
| 55 |
+
|
| 56 |
+
```bash
|
| 57 |
+
docker build -t multi-agent-assistant .
|
| 58 |
+
docker run -p 7860:7860 multi-agent-assistant
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
The server will launch automatically at http://localhost:7860, with all dependencies installed and configured inside the container.
|
| 62 |
+
|
| 63 |
+
---
|
| 64 |
+
|
| 65 |
+
## 📦 API Reference
|
| 66 |
+
|
| 67 |
+
### `/text` – Send Text Prompt
|
| 68 |
+
|
| 69 |
+
```http
|
| 70 |
+
POST /text
|
| 71 |
+
Form data:
|
| 72 |
+
- state (str): JSON-encoded state dict
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
### `/voice` – Send Audio Prompt
|
| 76 |
+
|
| 77 |
+
```http
|
| 78 |
+
POST /voice
|
| 79 |
+
Form data:
|
| 80 |
+
- state (str): JSON-encoded state dict
|
| 81 |
+
- file (binary): Audio file (WAV, MP3, etc.)
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
### `/image` – Send Image Prompt
|
| 85 |
+
|
| 86 |
+
```http
|
| 87 |
+
POST /voice
|
| 88 |
+
Form data:
|
| 89 |
+
- state (str): JSON-encoded state dict
|
| 90 |
+
- file (binary): Image file (JPEG, PNG, etc.)
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## 🧩 Agent State Structure
|
| 96 |
+
|
| 97 |
+
The system operates using a JSON-based shared state object passed between agents.
|
| 98 |
+
It follows a `TypedDict` schema and may contain keys such as:
|
| 99 |
+
|
| 100 |
+
```json
|
| 101 |
+
{
|
| 102 |
+
message: AnyMessage (message to the agent)
|
| 103 |
+
user_id: str (unique user id)
|
| 104 |
+
first_name: str
|
| 105 |
+
last_name: str
|
| 106 |
+
assistant_name: str
|
| 107 |
+
latitude: str
|
| 108 |
+
longitude: str
|
| 109 |
+
location: str (user-readable location)
|
| 110 |
+
openweathermap_api_key: str
|
| 111 |
+
github_token: str
|
| 112 |
+
tavily_api_key: str
|
| 113 |
+
groq_api_key: str
|
| 114 |
+
clear_history: bool (True/False)
|
| 115 |
+
messages: list (Filled automatically from the database)
|
| 116 |
+
|
| 117 |
+
}
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
This dictionary acts as a single mutable state shared across all agent steps, allowing for data accumulation, tool responses, and message tracking.
|
| 121 |
+
|
| 122 |
+
---
|
| 123 |
+
|
| 124 |
+
## 🧠 Built With
|
| 125 |
+
|
| 126 |
+
- [LangGraph](https://github.com/langchain-ai/langgraph)
|
| 127 |
+
- [LangChain](https://github.com/langchain-ai/langchain)
|
| 128 |
+
- [FastAPI](https://fastapi.tiangolo.com/)
|
| 129 |
+
- [Whisper](https://github.com/openai/whisper)
|
| 130 |
+
- [SQLAlchemy + Async](https://docs.sqlalchemy.org/en/20/orm/extensions/asyncio.html)
|
| 131 |
+
|
| 132 |
+
---
|
| 133 |
+
|
| 134 |
+
## 📝 License
|
| 135 |
+
|
| 136 |
+
MIT — Feel free to use, modify, and contribute!
|
__pycache__/app.cpython-311.pyc
ADDED
|
Binary file (7.04 kB). View file
|
|
|
__pycache__/app.cpython-312.pyc
ADDED
|
Binary file (6.37 kB). View file
|
|
|
agent/__pycache__/coder_agent.cpython-312.pyc
ADDED
|
Binary file (2.02 kB). View file
|
|
|
agent/__pycache__/deep_research_agent.cpython-312.pyc
ADDED
|
Binary file (2.09 kB). View file
|
|
|
agent/__pycache__/file_preprocessing.cpython-312.pyc
ADDED
|
Binary file (5.91 kB). View file
|
|
|
agent/__pycache__/models.cpython-312.pyc
ADDED
|
Binary file (525 Bytes). View file
|
|
|
agent/__pycache__/multi_agent.cpython-312.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
agent/__pycache__/prompts.cpython-312.pyc
ADDED
|
Binary file (3.08 kB). View file
|
|
|
agent/__pycache__/states.cpython-312.pyc
ADDED
|
Binary file (1.52 kB). View file
|
|
|
agent/__pycache__/tools.cpython-311.pyc
ADDED
|
Binary file (13.5 kB). View file
|
|
|
agent/__pycache__/tools.cpython-312.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
agent/coder_agent.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent.prompts import coder_instructions, coder_system_message
|
| 2 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 3 |
+
from agent.models import llm_agents, llm_peripheral
|
| 4 |
+
from langgraph.prebuilt import create_react_agent
|
| 5 |
+
from langgraph.constants import START, END
|
| 6 |
+
from agent.states import PlanCodingTask
|
| 7 |
+
from langgraph.graph import StateGraph
|
| 8 |
+
from agent.tools import coder_tools
|
| 9 |
+
|
| 10 |
+
agent = create_react_agent(
|
| 11 |
+
llm_agents,
|
| 12 |
+
tools=coder_tools,
|
| 13 |
+
prompt=coder_instructions
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
def planning_node(state: dict):
|
| 17 |
+
planer = llm_peripheral.with_structured_output(PlanCodingTask)
|
| 18 |
+
plan = planer.invoke(state['messages'][-1].content)
|
| 19 |
+
state.update(plan)
|
| 20 |
+
return state
|
| 21 |
+
|
| 22 |
+
def code_agent(state: dict):
|
| 23 |
+
system_message = SystemMessage(coder_system_message(state))
|
| 24 |
+
state.update(agent.invoke({
|
| 25 |
+
'messages': [
|
| 26 |
+
system_message,
|
| 27 |
+
HumanMessage(state['task_description']),
|
| 28 |
+
]
|
| 29 |
+
}))
|
| 30 |
+
return state
|
| 31 |
+
|
| 32 |
+
graph = StateGraph(dict)
|
| 33 |
+
graph.add_node("planning_node", planning_node)
|
| 34 |
+
graph.add_node("code_agent", code_agent)
|
| 35 |
+
graph.add_edge(START, "planning_node")
|
| 36 |
+
graph.add_edge("planning_node", "code_agent")
|
| 37 |
+
graph.add_edge("code_agent", END)
|
| 38 |
+
|
| 39 |
+
coder_agent = graph.compile(name="coder_agent")
|
agent/deep_research_agent.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent.prompts import deep_research_instructions, deep_research_system_message
|
| 2 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 3 |
+
from agent.models import llm_agents, llm_peripheral
|
| 4 |
+
from langgraph.prebuilt import create_react_agent
|
| 5 |
+
from agent.tools import deep_research_tools
|
| 6 |
+
from langgraph.constants import START, END
|
| 7 |
+
from langgraph.graph import StateGraph
|
| 8 |
+
from agent.states import PlanResearch
|
| 9 |
+
|
| 10 |
+
agent = create_react_agent(
|
| 11 |
+
llm_agents,
|
| 12 |
+
tools=deep_research_tools,
|
| 13 |
+
prompt=deep_research_instructions
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
def planning_node(state: dict):
|
| 17 |
+
planer = llm_peripheral.with_structured_output(PlanResearch)
|
| 18 |
+
plan = planer.invoke(state['messages'][-1].content)
|
| 19 |
+
state.update(plan)
|
| 20 |
+
return state
|
| 21 |
+
|
| 22 |
+
def research_agent(state: dict):
|
| 23 |
+
system_message = SystemMessage(deep_research_system_message(state))
|
| 24 |
+
state.update(agent.invoke({
|
| 25 |
+
'messages': [
|
| 26 |
+
system_message,
|
| 27 |
+
HumanMessage(state['messages'][-1].content),
|
| 28 |
+
]
|
| 29 |
+
}))
|
| 30 |
+
return state
|
| 31 |
+
|
| 32 |
+
graph = StateGraph(dict)
|
| 33 |
+
graph.add_node("planning_node", planning_node)
|
| 34 |
+
graph.add_node("research_agent", research_agent)
|
| 35 |
+
graph.add_edge(START, "planning_node")
|
| 36 |
+
graph.add_edge("planning_node", "research_agent")
|
| 37 |
+
graph.add_edge("research_agent", END)
|
| 38 |
+
|
| 39 |
+
deep_research_agent = graph.compile(name="deep_research_agent")
|
agent/file_preprocessing.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 2 |
+
from speechbrain.inference.classifiers import EncoderClassifier
|
| 3 |
+
import speech_recognition as sr
|
| 4 |
+
from pydub import AudioSegment
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
from PyPDF2 import PdfReader
|
| 7 |
+
from docx import Document
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import torchaudio
|
| 10 |
+
import mimetypes
|
| 11 |
+
import asyncio
|
| 12 |
+
import torch
|
| 13 |
+
import io
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
load_dotenv()
|
| 17 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 18 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 19 |
+
language_id = EncoderClassifier.from_hparams(source="speechbrain/lang-id-voxlingua107-ecapa", savedir="tmp")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
async def preprocess_file(file_name: str):
|
| 23 |
+
mime_type = mimetypes.guess_type(file_name)[0]
|
| 24 |
+
if "image" in mime_type:
|
| 25 |
+
return await asyncio.to_thread(preprocess_image, file_name)
|
| 26 |
+
elif "video" in mime_type:
|
| 27 |
+
prompt = "Give a detailed description of the video."
|
| 28 |
+
elif "audio" in mime_type:
|
| 29 |
+
return await asyncio.to_thread(preprocess_audio, file_name)
|
| 30 |
+
else:
|
| 31 |
+
return await asyncio.to_thread(preprocess_text, file_name, mime_type)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def preprocess_audio(file_name: str):
|
| 35 |
+
if not os.path.exists(file_name):
|
| 36 |
+
raise FileNotFoundError(f"File not found: {file_name}")
|
| 37 |
+
|
| 38 |
+
wav_file = os.path.splitext(file_name)[0] + ".wav"
|
| 39 |
+
audio = AudioSegment.from_file(file_name)
|
| 40 |
+
audio.export(wav_file, format="wav")
|
| 41 |
+
signal = language_id.load_audio(wav_file)
|
| 42 |
+
out = language_id.classify_batch(signal)[0].tolist()[0]
|
| 43 |
+
lang_mapping = {
|
| 44 |
+
20: "en",
|
| 45 |
+
106: "zh",
|
| 46 |
+
35: "hi",
|
| 47 |
+
22: "es",
|
| 48 |
+
3: "ar",
|
| 49 |
+
28: "fr",
|
| 50 |
+
77: "ru",
|
| 51 |
+
75: "pt",
|
| 52 |
+
9: "bn",
|
| 53 |
+
45: "ja",
|
| 54 |
+
18: "de",
|
| 55 |
+
51: "ko",
|
| 56 |
+
102: "vi",
|
| 57 |
+
99: "uk"
|
| 58 |
+
}
|
| 59 |
+
scores = [out[index] for index in lang_mapping.keys()]
|
| 60 |
+
language = list(lang_mapping.values())[scores.index(max(scores))]
|
| 61 |
+
recognizer = sr.Recognizer()
|
| 62 |
+
with sr.AudioFile(wav_file) as source:
|
| 63 |
+
audio_data = recognizer.record(source)
|
| 64 |
+
try:
|
| 65 |
+
text = recognizer.recognize_google(audio_data, language=language)
|
| 66 |
+
except sr.UnknownValueError:
|
| 67 |
+
text = "[Unintelligible audio]"
|
| 68 |
+
except sr.RequestError as e:
|
| 69 |
+
text = f"[API error: {e}]"
|
| 70 |
+
os.remove(wav_file)
|
| 71 |
+
return text
|
| 72 |
+
|
| 73 |
+
def preprocess_image(file_name: str) -> str:
|
| 74 |
+
raw_image = Image.open(file_name).convert("RGB")
|
| 75 |
+
text = "An image of"
|
| 76 |
+
inputs = processor(raw_image, text, return_tensors="pt")
|
| 77 |
+
with torch.no_grad():
|
| 78 |
+
out = model.generate(**inputs)
|
| 79 |
+
return processor.decode(out[0], skip_special_tokens=True)
|
| 80 |
+
|
| 81 |
+
def preprocess_text(file_name, mime_type: str) -> str:
|
| 82 |
+
if "pdf" in mime_type:
|
| 83 |
+
reader = PdfReader(file_name)
|
| 84 |
+
return "\n".join((p.extract_text() or "") for p in reader.pages)
|
| 85 |
+
elif "document" in mime_type:
|
| 86 |
+
doc = Document(file_name)
|
| 87 |
+
return "\n".join(p.text for p in doc.paragraphs)
|
| 88 |
+
try:
|
| 89 |
+
with open(file_name, "r", encoding="utf-8") as file:
|
| 90 |
+
return file.read()
|
| 91 |
+
except Exception:
|
| 92 |
+
return "[Unsupported format]"
|
agent/models.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.chat_models import init_chat_model
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
load_dotenv()
|
| 4 |
+
|
| 5 |
+
llm_supervisor = init_chat_model(
|
| 6 |
+
model="groq:openai/gpt-oss-120b"
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
llm_peripheral = init_chat_model(
|
| 10 |
+
model="groq:gemma2-9b-it"
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
llm_agents = init_chat_model(
|
| 14 |
+
model="groq:qwen/qwen3-32b"
|
| 15 |
+
)
|
agent/multi_agent.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from database_interaction.config import create_or_update_config, load_config_to_env, init_config_db
|
| 2 |
+
from database_interaction.user import get_user_by_id, create_or_update_user, init_user_db
|
| 3 |
+
from langchain_community.chat_message_histories import SQLChatMessageHistory
|
| 4 |
+
from langchain_core.messages import SystemMessage, AnyMessage, AIMessage
|
| 5 |
+
from langchain_core.messages.utils import count_tokens_approximately
|
| 6 |
+
from sqlalchemy.ext.asyncio import create_async_engine
|
| 7 |
+
from langgraph_supervisor import create_supervisor
|
| 8 |
+
from langmem.short_term import SummarizationNode
|
| 9 |
+
from typing_extensions import TypedDict
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
class State(TypedDict):
|
| 14 |
+
message: AnyMessage
|
| 15 |
+
user_id: str
|
| 16 |
+
first_name: str
|
| 17 |
+
last_name: str
|
| 18 |
+
assistant_name: str
|
| 19 |
+
latitude: str
|
| 20 |
+
longitude: str
|
| 21 |
+
location: str
|
| 22 |
+
openweathermap_api_key: str
|
| 23 |
+
github_token: str
|
| 24 |
+
tavily_api_key: str
|
| 25 |
+
groq_api_key: str
|
| 26 |
+
clear_history: bool
|
| 27 |
+
messages: list
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Assistant:
|
| 31 |
+
def __init__(self, state: State):
|
| 32 |
+
self.state = state
|
| 33 |
+
self.engine = create_async_engine("sqlite+aiosqlite:///./database_files/main.db", echo=False)
|
| 34 |
+
self.message_history = SQLChatMessageHistory(
|
| 35 |
+
session_id=state['user_id'],
|
| 36 |
+
connection=self.engine,
|
| 37 |
+
async_mode=True
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
async def authorization(self):
|
| 41 |
+
"""Handle user authorization and configuration setup"""
|
| 42 |
+
try:
|
| 43 |
+
await init_user_db()
|
| 44 |
+
await init_config_db()
|
| 45 |
+
await create_or_update_user(
|
| 46 |
+
user_id=self.state['user_id'],
|
| 47 |
+
first_name=self.state.get('first_name'),
|
| 48 |
+
last_name=self.state.get('last_name'),
|
| 49 |
+
latitude=float(self.state['latitude']) if self.state.get('latitude') else None,
|
| 50 |
+
longitude=float(self.state['longitude']) if self.state.get('longitude') else None,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
config_data = {}
|
| 54 |
+
config_fields = [
|
| 55 |
+
'assistant_name', 'openweathermap_api_key', 'github_token',
|
| 56 |
+
'tavily_api_key', 'groq_api_key'
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
for field in config_fields:
|
| 60 |
+
if self.state.get(field):
|
| 61 |
+
config_data[field] = self.state[field]
|
| 62 |
+
|
| 63 |
+
if config_data:
|
| 64 |
+
await create_or_update_config(user_id=self.state['user_id'], **config_data)
|
| 65 |
+
await load_config_to_env(user_id=self.state['user_id'])
|
| 66 |
+
|
| 67 |
+
if 'clear_history' in self.state and self.state['clear_history']:
|
| 68 |
+
await self.message_history.aclear()
|
| 69 |
+
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"Authorization/setup error: {e}")
|
| 72 |
+
|
| 73 |
+
def compile_multi_agent_system(self):
|
| 74 |
+
"""Create and return the multi-agent system"""
|
| 75 |
+
try:
|
| 76 |
+
from agent.deep_research_agent import deep_research_agent
|
| 77 |
+
from agent.coder_agent import coder_agent
|
| 78 |
+
from agent.prompts import supervisor_instructions
|
| 79 |
+
from agent.models import llm_supervisor, llm_peripheral
|
| 80 |
+
from agent.tools import supervisor_tools
|
| 81 |
+
|
| 82 |
+
summarization_node = SummarizationNode(
|
| 83 |
+
token_counter=count_tokens_approximately,
|
| 84 |
+
model=llm_peripheral,
|
| 85 |
+
max_tokens=4000,
|
| 86 |
+
max_summary_tokens=1000,
|
| 87 |
+
output_messages_key="llm_input_messages",
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
agents = [coder_agent, deep_research_agent]
|
| 91 |
+
|
| 92 |
+
supervisor = create_supervisor(
|
| 93 |
+
model=llm_supervisor,
|
| 94 |
+
tools=supervisor_tools,
|
| 95 |
+
agents=agents,
|
| 96 |
+
prompt=supervisor_instructions(supervisor_tools, agents),
|
| 97 |
+
add_handoff_back_messages=False,
|
| 98 |
+
add_handoff_messages=False,
|
| 99 |
+
output_mode="full_history",
|
| 100 |
+
pre_model_hook=summarization_node
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
return supervisor.compile()
|
| 104 |
+
|
| 105 |
+
except Exception as e:
|
| 106 |
+
print(f"Error creating multi-agent system: {e}")
|
| 107 |
+
# Return a simple fallback system with proper async interface
|
| 108 |
+
from langchain_core.messages import HumanMessage
|
| 109 |
+
from langgraph.graph import StateGraph
|
| 110 |
+
from typing import Dict, Any
|
| 111 |
+
|
| 112 |
+
def fallback_node(state: Dict[str, Any]):
|
| 113 |
+
return {"messages": state.get("messages", []) + [
|
| 114 |
+
HumanMessage(content=f"System error: {str(e)}. Please check configuration and try again.")]}
|
| 115 |
+
|
| 116 |
+
fallback_graph = StateGraph(dict)
|
| 117 |
+
fallback_graph.add_node("fallback", fallback_node)
|
| 118 |
+
fallback_graph.set_entry_point("fallback")
|
| 119 |
+
fallback_graph.set_finish_point("fallback")
|
| 120 |
+
|
| 121 |
+
return fallback_graph.compile()
|
| 122 |
+
|
| 123 |
+
async def run(self):
|
| 124 |
+
"""Process messages through the multi-agent system"""
|
| 125 |
+
try:
|
| 126 |
+
user_info = await get_user_by_id(user_id=self.state['user_id'])
|
| 127 |
+
if user_info.get('location'):
|
| 128 |
+
os.environ['LOCATION'] = user_info['location']
|
| 129 |
+
if user_info.get('latitude'):
|
| 130 |
+
os.environ['LATITUDE'] = str(user_info['latitude'])
|
| 131 |
+
if user_info.get('longitude'):
|
| 132 |
+
os.environ['LONGITUDE'] = str(user_info['longitude'])
|
| 133 |
+
|
| 134 |
+
system_msg = SystemMessage(
|
| 135 |
+
content=f"""
|
| 136 |
+
You are an intelligent assistant named {os.getenv('ASSISTANT_NAME', 'Assistant')}, helpful personal assistant built using a multi-agent system architecture. Your tools include web search, weather and time lookups, code execution, and GitHub integration. You work inside a Telegram interface and respond concisely, clearly, and informatively.
|
| 137 |
+
|
| 138 |
+
The user you are assisting is:
|
| 139 |
+
- **Name**: {user_info.get('first_name', 'Unknown') or 'Unknown'} {user_info.get('last_name', '') or ''}
|
| 140 |
+
- **User ID**: {self.state['user_id']}
|
| 141 |
+
- **Location**: {user_info.get('location', 'Unknown') or 'Unknown'}
|
| 142 |
+
- **Coordinates**: ({user_info.get('latitude', 'N/A') or 'N/A'}, {user_info.get('longitude', 'N/A') or 'N/A'})
|
| 143 |
+
|
| 144 |
+
You may use their location when answering weather or time-related queries. If the location is unknown, you may ask the user to share it.
|
| 145 |
+
|
| 146 |
+
Stay helpful, respectful, and relevant to the user's query.
|
| 147 |
+
""".strip()
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
await self.message_history.aadd_message(self.state['message'])
|
| 151 |
+
messages = await self.message_history.aget_messages()
|
| 152 |
+
|
| 153 |
+
self.state['messages'] = messages[-8:-1] + [system_msg, messages[-1]]
|
| 154 |
+
multi_agent_system = self.compile_multi_agent_system()
|
| 155 |
+
|
| 156 |
+
result = await multi_agent_system.ainvoke({"messages": self.state["messages"]},
|
| 157 |
+
generation_config=dict(response_modalities=["TEXT"]))
|
| 158 |
+
await self.message_history.aadd_message(result['messages'][-1])
|
| 159 |
+
return {"messages": result.get("messages", [])}
|
| 160 |
+
|
| 161 |
+
except Exception as e:
|
| 162 |
+
print(f"Multi-agent node error: {e}")
|
| 163 |
+
from langchain_core.messages import HumanMessage
|
| 164 |
+
return {"messages": [AIMessage(content=f"I encountered an error: {str(e)}. Please try again.")]}
|
agent/prompts.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
coder_instructions = f"""You are a coding expert. Follow these steps every time:\n
|
| 2 |
+
|
| 3 |
+
1. Use the 'web_search' tool to find how to solve the task or retrieve the package documentation and code examples.\n
|
| 4 |
+
2. Create a new GitHub repository for this task using 'create_repo' tool.\n
|
| 5 |
+
3. Use 'commit_file_to_repo' tool to save your solution code into the GitHub repository.\n
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
deep_research_instructions = f"""You are a deep research expert. Follow these steps every time:\n
|
| 10 |
+
|
| 11 |
+
1. Iterate over the given search queries.\n
|
| 12 |
+
2. Iterate over the following steps untill you have written factually correct report:\n
|
| 13 |
+
2.1. Write a report based on the search results.\n
|
| 14 |
+
2.2. Critically asses the report.\n
|
| 15 |
+
2.3. Conduct additional search using tools to find more for research.\n
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def supervisor_instructions(tools: list, agents: list):
|
| 19 |
+
return f"""You are supervisor agent. Your job is to satisfy user's requests using the given tools and agents.\n
|
| 20 |
+
|
| 21 |
+
You have access to the following tools and agents:\n
|
| 22 |
+
|
| 23 |
+
* Tools: {[tool.name + ';' for tool in tools]}\n
|
| 24 |
+
|
| 25 |
+
* Agents: {[agent.name + ';' for agent in agents]}\n
|
| 26 |
+
|
| 27 |
+
Use these to satisfy for a given queries."""
|
| 28 |
+
|
| 29 |
+
def coder_system_message(state: dict):
|
| 30 |
+
return f"""Your job is to create a coding project based on the user query.
|
| 31 |
+
|
| 32 |
+
1. Create a new GitHub {"private" if state['private'] else "public"} repository, named '{state['repo_name']}' for this task using 'create_repo' tool.\n
|
| 33 |
+
2. Use the 'web_search' tool to research your task.\n
|
| 34 |
+
3. Commit files with the code to your repository.\n
|
| 35 |
+
4. Critically review your code for weak points using 'list_files' and 'read_file' tools.\n
|
| 36 |
+
5. Use 'web_search' for the latest docs and code examples.\n
|
| 37 |
+
6. Adjust the code after the carefully reviewing your code.\n
|
| 38 |
+
"""
|
| 39 |
+
def deep_research_system_message(state: dict):
|
| 40 |
+
return f"""Your job is to conduct deep research based on the user query.\n
|
| 41 |
+
|
| 42 |
+
1. Iterate over the following search queries: {state['search_queries']}\n
|
| 43 |
+
2. Write an extensive report based on web search results.\n
|
| 44 |
+
3. Critically review your report for weak points.\n
|
| 45 |
+
4. Conduct additional research using available tools.\t
|
| 46 |
+
5. Write an extensive report based on web search results.\n
|
| 47 |
+
6. Critically review your report and cicle over these steps until it is factually correct and very detailed.\n
|
| 48 |
+
"""
|
agent/states.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TypedDict, List
|
| 2 |
+
from pydantic import Field
|
| 3 |
+
|
| 4 |
+
class SearchQuery(TypedDict):
|
| 5 |
+
query: str = Field(description="A single plain text search query string.")
|
| 6 |
+
|
| 7 |
+
class PlanResearch(TypedDict):
|
| 8 |
+
search_queries: List[SearchQuery] = Field(description="A list of search queries, to find all the info user asked for. Break user query down into smaller search queries.")
|
| 9 |
+
|
| 10 |
+
class PlanCodingTask(TypedDict):
|
| 11 |
+
repo_name: str = Field(description="The name of the GitHub repository for the project.")
|
| 12 |
+
private: bool = Field(description="Whether or not the repository is private.", default=False)
|
| 13 |
+
task_description: str = Field(description="A detailed description of the project for the coder to create.")
|
| 14 |
+
|
agent/tools.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
|
| 2 |
+
from langchain_community.utilities import OpenWeatherMapAPIWrapper
|
| 3 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
| 4 |
+
from langchain_community.tools import WikipediaQueryRun
|
| 5 |
+
from langchain_tavily.tavily_search import TavilySearch
|
| 6 |
+
from timezonefinder import TimezoneFinder
|
| 7 |
+
from langchain_core.tools import tool
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
from geopy import Nominatim
|
| 10 |
+
from github import Github
|
| 11 |
+
import datetime
|
| 12 |
+
import pytz
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
load_dotenv()
|
| 16 |
+
|
| 17 |
+
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
| 18 |
+
yahoo = YahooFinanceNewsTool()
|
| 19 |
+
web_search = TavilySearch(
|
| 20 |
+
max_results=5,
|
| 21 |
+
topic="general",
|
| 22 |
+
include_answer=True
|
| 23 |
+
)
|
| 24 |
+
tf = TimezoneFinder()
|
| 25 |
+
geolocator = Nominatim(user_agent="my_geocoder")
|
| 26 |
+
weekday_mapping = ("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")
|
| 27 |
+
|
| 28 |
+
@tool
|
| 29 |
+
def create_repo(repo_name: str, private: bool = False):
|
| 30 |
+
"""Creates a GitHub repository with the given repo_name."""
|
| 31 |
+
try:
|
| 32 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 33 |
+
user = github.get_user()
|
| 34 |
+
user.create_repo(repo_name, private=private)
|
| 35 |
+
return f"Repository '{repo_name}' created successfully!"
|
| 36 |
+
except Exception as e:
|
| 37 |
+
return f"Error creating repository: {str(e)}"
|
| 38 |
+
|
| 39 |
+
@tool
|
| 40 |
+
def commit_file_to_repo(repo_name: str, file_path: str, file_contents: str):
|
| 41 |
+
"""Adds a new file to the GitHub repository or updates the existing one."""
|
| 42 |
+
try:
|
| 43 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 44 |
+
user = github.get_user()
|
| 45 |
+
repo = user.get_repo(repo_name)
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
# Check if file exists
|
| 49 |
+
file = repo.get_contents(file_path)
|
| 50 |
+
sha = file.sha
|
| 51 |
+
repo.update_file(file_path, "Updating file", file_contents, sha)
|
| 52 |
+
return f"File '{file_path}' updated successfully in '{repo_name}'."
|
| 53 |
+
except:
|
| 54 |
+
repo.create_file(file_path, "Adding new file", file_contents)
|
| 55 |
+
return f"File '{file_path}' created successfully in '{repo_name}'."
|
| 56 |
+
except Exception as e:
|
| 57 |
+
return f"Error with file operation: {str(e)}"
|
| 58 |
+
|
| 59 |
+
@tool
|
| 60 |
+
def read_file(repo_name: str, file_path: str):
|
| 61 |
+
"""Reads the content of a file from a GitHub repository."""
|
| 62 |
+
try:
|
| 63 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 64 |
+
user = github.get_user()
|
| 65 |
+
repo = user.get_repo(repo_name)
|
| 66 |
+
file = repo.get_contents(file_path)
|
| 67 |
+
return file.decoded_content.decode('utf-8')
|
| 68 |
+
except Exception as e:
|
| 69 |
+
return f"Error reading file: {str(e)}"
|
| 70 |
+
|
| 71 |
+
@tool
|
| 72 |
+
def list_repos():
|
| 73 |
+
"""Lists all repositories owned by the authenticated GitHub user."""
|
| 74 |
+
try:
|
| 75 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 76 |
+
user = github.get_user()
|
| 77 |
+
repos = user.get_repos()
|
| 78 |
+
repo_names = [repo.name for repo in repos]
|
| 79 |
+
return f"Repositories: {', '.join(repo_names)}"
|
| 80 |
+
except Exception as e:
|
| 81 |
+
return f"Error listing repositories: {str(e)}"
|
| 82 |
+
|
| 83 |
+
@tool
|
| 84 |
+
def list_files(repo_name: str):
|
| 85 |
+
"""Lists all files in the GitHub repository."""
|
| 86 |
+
try:
|
| 87 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 88 |
+
user = github.get_user()
|
| 89 |
+
repo = user.get_repo(repo_name)
|
| 90 |
+
files = repo.get_contents("")
|
| 91 |
+
file_list = [file.name for file in files]
|
| 92 |
+
return f"Files in '{repo_name}': {', '.join(file_list)}"
|
| 93 |
+
except Exception as e:
|
| 94 |
+
return f"Error listing files: {str(e)}"
|
| 95 |
+
|
| 96 |
+
@tool
|
| 97 |
+
def delete_file(repo_name: str, file_path: str):
|
| 98 |
+
"""Deletes a file from the GitHub repository."""
|
| 99 |
+
try:
|
| 100 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 101 |
+
user = github.get_user()
|
| 102 |
+
repo = user.get_repo(repo_name)
|
| 103 |
+
file = repo.get_contents(file_path)
|
| 104 |
+
sha = file.sha
|
| 105 |
+
repo.delete_file(file_path, "Deleting file", sha)
|
| 106 |
+
return f"File '{file_path}' deleted successfully from '{repo_name}'."
|
| 107 |
+
except Exception as e:
|
| 108 |
+
return f"Error deleting file: {str(e)}"
|
| 109 |
+
|
| 110 |
+
@tool
|
| 111 |
+
def list_branches(repo_name: str):
|
| 112 |
+
"""Lists all branches in a GitHub repository."""
|
| 113 |
+
try:
|
| 114 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 115 |
+
user = github.get_user()
|
| 116 |
+
repo = user.get_repo(repo_name)
|
| 117 |
+
branches = repo.get_branches()
|
| 118 |
+
branch_names = [branch.name for branch in branches]
|
| 119 |
+
return f"Branches in '{repo_name}': {', '.join(branch_names)}"
|
| 120 |
+
except Exception as e:
|
| 121 |
+
return f"Error listing branches: {str(e)}"
|
| 122 |
+
|
| 123 |
+
@tool
|
| 124 |
+
def create_branch(repo_name: str, branch_name: str):
|
| 125 |
+
"""Creates a new branch in a GitHub repository."""
|
| 126 |
+
try:
|
| 127 |
+
github = Github(os.getenv('GITHUB_TOKEN'))
|
| 128 |
+
user = github.get_user()
|
| 129 |
+
repo = user.get_repo(repo_name)
|
| 130 |
+
main_branch = repo.get_branch("main")
|
| 131 |
+
repo.create_git_ref(ref=f"refs/heads/{branch_name}", sha=main_branch.commit.sha)
|
| 132 |
+
return f"Branch '{branch_name}' created successfully in '{repo_name}'."
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return f"Error creating branch: {str(e)}"
|
| 135 |
+
|
| 136 |
+
@tool
|
| 137 |
+
def current_time(location: str = None):
|
| 138 |
+
"""Get the current time for a location or current position."""
|
| 139 |
+
try:
|
| 140 |
+
if location:
|
| 141 |
+
location_data = geolocator.geocode(location)
|
| 142 |
+
if location_data:
|
| 143 |
+
timezone = pytz.timezone(tf.timezone_at(lat=location_data.latitude, lng=location_data.longitude))
|
| 144 |
+
location_name = location.capitalize()
|
| 145 |
+
else:
|
| 146 |
+
return f"Could not find location: {location}"
|
| 147 |
+
else:
|
| 148 |
+
# Use environment location if available
|
| 149 |
+
lat = os.getenv('LATITUDE')
|
| 150 |
+
lon = os.getenv('LONGITUDE')
|
| 151 |
+
if lat and lon:
|
| 152 |
+
timezone = pytz.timezone(tf.timezone_at(lat=float(lat), lng=float(lon)))
|
| 153 |
+
location_name = os.getenv('LOCATION', 'Current Location')
|
| 154 |
+
else:
|
| 155 |
+
timezone = pytz.UTC
|
| 156 |
+
location_name = 'UTC'
|
| 157 |
+
|
| 158 |
+
current_dt = datetime.datetime.now(timezone)
|
| 159 |
+
weekday = weekday_mapping[current_dt.weekday()]
|
| 160 |
+
return f"Location: {location_name}; Current Date and Time: {current_dt.strftime('%Y-%m-%d %H:%M')}, {weekday}."
|
| 161 |
+
except Exception as e:
|
| 162 |
+
return f"Error getting current time: {str(e)}"
|
| 163 |
+
|
| 164 |
+
@tool
|
| 165 |
+
def weather(location: str = None):
|
| 166 |
+
"""Get the current weather for a location or current position."""
|
| 167 |
+
try:
|
| 168 |
+
weather_wrapper = OpenWeatherMapAPIWrapper(
|
| 169 |
+
openweathermap_api_key=os.getenv('OPENWEATHERMAP_API_KEY')
|
| 170 |
+
)
|
| 171 |
+
if not location:
|
| 172 |
+
location = os.getenv('LOCATION', 'Unknown')
|
| 173 |
+
return weather_wrapper.run(location=location)
|
| 174 |
+
except Exception as e:
|
| 175 |
+
return f"Error getting weather: {str(e)}"
|
| 176 |
+
|
| 177 |
+
coder_tools = [web_search, create_repo, create_branch, commit_file_to_repo, read_file, list_files, list_repos, list_branches, delete_file]
|
| 178 |
+
supervisor_tools = [yahoo, web_search, current_time, weather]
|
| 179 |
+
deep_research_tools = [web_search, yahoo, wikipedia]
|
app.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException
|
| 2 |
+
from agent.file_preprocessing import preprocess_file
|
| 3 |
+
from langchain_core.messages import HumanMessage
|
| 4 |
+
from agent.multi_agent import Assistant
|
| 5 |
+
from typing import Any, Dict
|
| 6 |
+
from fastapi import Form
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
MEDIA_DIR = Path("mediafiles")
|
| 12 |
+
(MEDIA_DIR.mkdir(exist_ok=True))
|
| 13 |
+
|
| 14 |
+
app = FastAPI()
|
| 15 |
+
|
| 16 |
+
@app.post("/voice")
|
| 17 |
+
async def file_mode(
|
| 18 |
+
state: str = Form(...),
|
| 19 |
+
file: UploadFile = File(...)
|
| 20 |
+
) -> Dict[str, Any]:
|
| 21 |
+
# Parse JSON string form field into dict
|
| 22 |
+
try:
|
| 23 |
+
state_data = json.loads(state)
|
| 24 |
+
except json.JSONDecodeError:
|
| 25 |
+
raise HTTPException(status_code=400, detail="Invalid JSON in 'state' form field")
|
| 26 |
+
|
| 27 |
+
# Save the uploaded file asynchronously
|
| 28 |
+
dest = MEDIA_DIR / file.filename
|
| 29 |
+
data = await file.read()
|
| 30 |
+
dest.write_bytes(data)
|
| 31 |
+
|
| 32 |
+
# Preprocess file
|
| 33 |
+
transcription = await preprocess_file(str(dest))
|
| 34 |
+
state_data["message"] = HumanMessage(
|
| 35 |
+
content=transcription
|
| 36 |
+
)
|
| 37 |
+
# Call the agents
|
| 38 |
+
assistant = Assistant(state=state_data)
|
| 39 |
+
await assistant.authorization()
|
| 40 |
+
response = await assistant.run()
|
| 41 |
+
os.remove(str(dest))
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
@app.post("/image")
|
| 45 |
+
async def file_mode(
|
| 46 |
+
state: str = Form(...),
|
| 47 |
+
file: UploadFile = File(...)
|
| 48 |
+
) -> Dict[str, Any]:
|
| 49 |
+
# Parse JSON string form field into dict
|
| 50 |
+
try:
|
| 51 |
+
state_data = json.loads(state)
|
| 52 |
+
except json.JSONDecodeError:
|
| 53 |
+
raise HTTPException(status_code=400, detail="Invalid JSON in 'state' form field")
|
| 54 |
+
|
| 55 |
+
# Save the uploaded file asynchronously
|
| 56 |
+
dest = MEDIA_DIR / file.filename
|
| 57 |
+
data = await file.read()
|
| 58 |
+
dest.write_bytes(data)
|
| 59 |
+
|
| 60 |
+
# Preprocess file
|
| 61 |
+
file_contents = await preprocess_file(str(dest))
|
| 62 |
+
state_data["message"] = HumanMessage(
|
| 63 |
+
content=f"{state_data.get('message', {}).get('content' 'User uploaded the image.')} \nImage Description: {file_contents}"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# Call the agents
|
| 67 |
+
assistant = Assistant(state=state_data)
|
| 68 |
+
await assistant.authorization()
|
| 69 |
+
response = await assistant.run()
|
| 70 |
+
os.remove(str(dest))
|
| 71 |
+
return response
|
| 72 |
+
|
| 73 |
+
@app.post("/file")
|
| 74 |
+
async def file_mode(
|
| 75 |
+
state: str = Form(...),
|
| 76 |
+
file: UploadFile = File(...)
|
| 77 |
+
) -> Dict[str, Any]:
|
| 78 |
+
# Parse JSON string form field into dict
|
| 79 |
+
try:
|
| 80 |
+
state_data = json.loads(state)
|
| 81 |
+
except json.JSONDecodeError:
|
| 82 |
+
raise HTTPException(status_code=400, detail="Invalid JSON in 'state' form field")
|
| 83 |
+
|
| 84 |
+
# Save the uploaded file asynchronously
|
| 85 |
+
dest = MEDIA_DIR / file.filename
|
| 86 |
+
data = await file.read()
|
| 87 |
+
dest.write_bytes(data)
|
| 88 |
+
|
| 89 |
+
# Preprocess file
|
| 90 |
+
file_contents = await preprocess_file(str(dest))
|
| 91 |
+
state_data["message"] = HumanMessage(
|
| 92 |
+
content=f"{state_data.get('message', {}).get('content' 'User uploaded the file.')} \nFile description: {file_contents}"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Call the agents
|
| 96 |
+
assistant = Assistant(state=state_data)
|
| 97 |
+
await assistant.authorization()
|
| 98 |
+
response = await assistant.run()
|
| 99 |
+
os.remove(str(dest))
|
| 100 |
+
return response
|
| 101 |
+
|
| 102 |
+
@app.post("/text")
|
| 103 |
+
async def text_mode(
|
| 104 |
+
state: str = Form(...),
|
| 105 |
+
) -> Dict[str, Any]:
|
| 106 |
+
# Parse JSON string form field into dict
|
| 107 |
+
try:
|
| 108 |
+
state_data = json.loads(state)
|
| 109 |
+
except json.JSONDecodeError:
|
| 110 |
+
raise HTTPException(status_code=400, detail="Invalid JSON in 'state' form field")
|
| 111 |
+
# Reformat the message
|
| 112 |
+
state_data['message'] = HumanMessage(content=state_data['message']['content'])
|
| 113 |
+
# Call the agents
|
| 114 |
+
assistant = Assistant(state=state_data)
|
| 115 |
+
await assistant.authorization()
|
| 116 |
+
response = await assistant.run()
|
| 117 |
+
return response
|
| 118 |
+
|
| 119 |
+
@app.post("/authorization")
|
| 120 |
+
async def authorization_mode(
|
| 121 |
+
state: str = Form(...),
|
| 122 |
+
):
|
| 123 |
+
# Parse JSON string form field into dict
|
| 124 |
+
try:
|
| 125 |
+
state_data = json.loads(state)
|
| 126 |
+
except json.JSONDecodeError:
|
| 127 |
+
raise HTTPException(status_code=400, detail="Invalid JSON in 'state' form field")
|
| 128 |
+
# Call the agents
|
| 129 |
+
assistant = Assistant(state=state_data)
|
| 130 |
+
await assistant.authorization()
|
| 131 |
+
return {"message": "Authorization successful"}
|
database_files/main.db
ADDED
|
Binary file (32.8 kB). View file
|
|
|
database_interaction/__pycache__/config.cpython-311.pyc
ADDED
|
Binary file (6.37 kB). View file
|
|
|
database_interaction/__pycache__/config.cpython-312.pyc
ADDED
|
Binary file (5.21 kB). View file
|
|
|
database_interaction/__pycache__/models.cpython-311.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
database_interaction/__pycache__/models.cpython-312.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
database_interaction/__pycache__/user.cpython-311.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
database_interaction/__pycache__/user.cpython-312.pyc
ADDED
|
Binary file (4.7 kB). View file
|
|
|
database_interaction/config.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
| 2 |
+
from database_interaction.models import UserConfig, Base
|
| 3 |
+
from sqlalchemy.orm import sessionmaker
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
load_dotenv()
|
| 8 |
+
engine = create_async_engine("sqlite+aiosqlite:///./database_files/main.db", echo=False)
|
| 9 |
+
AsyncSessionLocal = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
async def init_config_db():
|
| 13 |
+
"""Initialize config database tables"""
|
| 14 |
+
async with engine.begin() as conn:
|
| 15 |
+
await conn.run_sync(Base.metadata.create_all)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def create_or_update_config(user_id: str, **kwargs):
|
| 19 |
+
"""Create or update user configuration"""
|
| 20 |
+
async with AsyncSessionLocal() as session:
|
| 21 |
+
async with session.begin():
|
| 22 |
+
config = await session.get(UserConfig, user_id)
|
| 23 |
+
if config:
|
| 24 |
+
for key, value in kwargs.items():
|
| 25 |
+
if hasattr(config, key) and value is not None:
|
| 26 |
+
setattr(config, key, value)
|
| 27 |
+
else:
|
| 28 |
+
# Filter out None values for new config
|
| 29 |
+
filtered_kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
| 30 |
+
config = UserConfig(user_id=user_id, **filtered_kwargs)
|
| 31 |
+
session.add(config)
|
| 32 |
+
await session.commit()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
async def get_config_by_user_id(user_id: str):
|
| 36 |
+
"""Get configuration by user ID"""
|
| 37 |
+
async with AsyncSessionLocal() as session:
|
| 38 |
+
config = await session.get(UserConfig, user_id)
|
| 39 |
+
if config:
|
| 40 |
+
return {key: getattr(config, key) for key in config.__table__.columns.keys()}
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
async def load_config_to_env(user_id: str):
|
| 45 |
+
"""Load user configuration to environment variables"""
|
| 46 |
+
config = await get_config_by_user_id(user_id)
|
| 47 |
+
if not config:
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
# Map config keys to environment variable names
|
| 51 |
+
env_mapping = {
|
| 52 |
+
'assistant_name': 'ASSISTANT_NAME',
|
| 53 |
+
'openweathermap_api_key': 'OPENWEATHERMAP_API_KEY',
|
| 54 |
+
'github_token': 'GITHUB_TOKEN',
|
| 55 |
+
'tavily_api_key': 'TAVILY_API_KEY',
|
| 56 |
+
'groq_api_key': 'GROQ_API_KEY',
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
for config_key, env_key in env_mapping.items():
|
| 60 |
+
value = config.get(config_key)
|
| 61 |
+
if value is not None:
|
| 62 |
+
os.environ[env_key] = str(value)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
async def save_env_to_file():
|
| 66 |
+
"""Save current environment variables to .env file"""
|
| 67 |
+
env_vars = [
|
| 68 |
+
'DATABASE_URL', 'TOKEN', 'ASSISTANT_NAME', 'LOCATION', 'LATITUDE', 'LONGITUDE',
|
| 69 |
+
'OPENWEATHERMAP_API_KEY', 'GITHUB_TOKEN', 'TAVILY_API_KEY',
|
| 70 |
+
'GROQ_API_KEY'
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
# Set default values if not present
|
| 74 |
+
if not os.getenv('DATABASE_URL'):
|
| 75 |
+
os.environ['DATABASE_URL'] = 'sqlite+aiosqlite:///./database_files/main.db'
|
| 76 |
+
if not os.getenv('TOKEN'):
|
| 77 |
+
os.environ['TOKEN'] = '7456008559:AAFbid5x8hhM4qe70SW9xyDHaiDCwHjkQH4'
|
| 78 |
+
|
| 79 |
+
with open('.env', 'w') as f:
|
| 80 |
+
for var in env_vars:
|
| 81 |
+
value = os.getenv(var, '')
|
| 82 |
+
f.write(f"{var}={value}\n")
|
database_interaction/models.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy.orm import declarative_base
|
| 2 |
+
from sqlalchemy import Column, String, Float
|
| 3 |
+
|
| 4 |
+
Base = declarative_base()
|
| 5 |
+
|
| 6 |
+
class User(Base):
|
| 7 |
+
__tablename__ = "users"
|
| 8 |
+
|
| 9 |
+
user_id = Column(String, primary_key=True, index=True)
|
| 10 |
+
first_name = Column(String, nullable=True)
|
| 11 |
+
last_name = Column(String, nullable=True)
|
| 12 |
+
latitude = Column(Float, nullable=True)
|
| 13 |
+
longitude = Column(Float, nullable=True)
|
| 14 |
+
location = Column(String, nullable=True)
|
| 15 |
+
|
| 16 |
+
class UserConfig(Base):
|
| 17 |
+
__tablename__ = "user_config"
|
| 18 |
+
|
| 19 |
+
user_id = Column(String, primary_key=True, index=True)
|
| 20 |
+
assistant_name = Column(String, nullable=True)
|
| 21 |
+
openweathermap_api_key = Column(String, nullable=True)
|
| 22 |
+
github_token = Column(String, nullable=True)
|
| 23 |
+
tavily_api_key = Column(String, nullable=True)
|
| 24 |
+
groq_api_key = Column(String, nullable=True)
|
database_interaction/user.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
| 2 |
+
from database_interaction.models import User, Base
|
| 3 |
+
from sqlalchemy.orm import sessionmaker
|
| 4 |
+
from geopy.geocoders import Nominatim
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
load_dotenv()
|
| 9 |
+
# Ensure SQLite URL is used, override any PostgreSQL URL from environment
|
| 10 |
+
DATABASE_URL = "sqlite+aiosqlite:///./database_files/main.db"
|
| 11 |
+
engine = create_async_engine(DATABASE_URL, echo=False)
|
| 12 |
+
AsyncSessionLocal = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
|
| 13 |
+
geolocator = Nominatim(user_agent="ai_assistant")
|
| 14 |
+
|
| 15 |
+
def get_location_name(lat: float, lon: float) -> str:
|
| 16 |
+
"""Get location name from coordinates"""
|
| 17 |
+
try:
|
| 18 |
+
location = geolocator.reverse((lat, lon), language='en')
|
| 19 |
+
if location and location.raw and 'address' in location.raw:
|
| 20 |
+
address = location.raw['address']
|
| 21 |
+
return (
|
| 22 |
+
address.get('city')
|
| 23 |
+
or address.get('town')
|
| 24 |
+
or address.get('village')
|
| 25 |
+
or address.get('municipality')
|
| 26 |
+
or address.get('county')
|
| 27 |
+
or "Unknown"
|
| 28 |
+
)
|
| 29 |
+
return "Unknown"
|
| 30 |
+
except Exception as e:
|
| 31 |
+
print(f"[Geocoding Error] {e}")
|
| 32 |
+
return "Unknown"
|
| 33 |
+
|
| 34 |
+
async def init_user_db():
|
| 35 |
+
"""Initialize user database tables"""
|
| 36 |
+
async with engine.begin() as conn:
|
| 37 |
+
await conn.run_sync(Base.metadata.create_all)
|
| 38 |
+
|
| 39 |
+
async def create_or_update_user(user_id: str, first_name: str = None, last_name: str = None,
|
| 40 |
+
latitude: float = None, longitude: float = None):
|
| 41 |
+
"""Create or update user information
|
| 42 |
+
:rtype: None
|
| 43 |
+
"""
|
| 44 |
+
location_name = get_location_name(latitude, longitude) if latitude and longitude else None
|
| 45 |
+
async with AsyncSessionLocal() as session:
|
| 46 |
+
async with session.begin():
|
| 47 |
+
user = await session.get(User, user_id)
|
| 48 |
+
if user:
|
| 49 |
+
if first_name is not None:
|
| 50 |
+
user.first_name = first_name
|
| 51 |
+
if last_name is not None:
|
| 52 |
+
user.last_name = last_name
|
| 53 |
+
if latitude is not None:
|
| 54 |
+
user.latitude = latitude
|
| 55 |
+
if longitude is not None:
|
| 56 |
+
user.longitude = longitude
|
| 57 |
+
if location_name is not None:
|
| 58 |
+
user.location = location_name
|
| 59 |
+
else:
|
| 60 |
+
user = User(
|
| 61 |
+
user_id=user_id,
|
| 62 |
+
first_name=first_name,
|
| 63 |
+
last_name=last_name,
|
| 64 |
+
latitude=latitude,
|
| 65 |
+
longitude=longitude,
|
| 66 |
+
location=location_name
|
| 67 |
+
)
|
| 68 |
+
session.add(user)
|
| 69 |
+
await session.commit()
|
| 70 |
+
|
| 71 |
+
async def get_user_by_id(user_id: str):
|
| 72 |
+
"""Get user by ID"""
|
| 73 |
+
async with AsyncSessionLocal() as session:
|
| 74 |
+
result = await session.get(User, user_id)
|
| 75 |
+
if result:
|
| 76 |
+
return {
|
| 77 |
+
"user_id": user_id,
|
| 78 |
+
"first_name": result.first_name,
|
| 79 |
+
"last_name": result.last_name,
|
| 80 |
+
"latitude": result.latitude,
|
| 81 |
+
"longitude": result.longitude,
|
| 82 |
+
"location": result.location
|
| 83 |
+
}
|
| 84 |
+
return {
|
| 85 |
+
"user_id": user_id,
|
| 86 |
+
"first_name": None,
|
| 87 |
+
"last_name": None,
|
| 88 |
+
"latitude": None,
|
| 89 |
+
"longitude": None,
|
| 90 |
+
"location": None
|
| 91 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi>=0.116.1
|
| 2 |
+
uvicorn>=0.35.0
|
| 3 |
+
python-dotenv>=1.1.1
|
| 4 |
+
langchain>=0.3.27
|
| 5 |
+
langgraph>=0.6.3
|
| 6 |
+
openai>=1.99.1
|
| 7 |
+
transformers>=4.53.2
|
| 8 |
+
langgraph-supervisor>=0.0.29
|
| 9 |
+
wikipedia>=1.4.0
|
| 10 |
+
wolframalpha>=5.1.3
|
| 11 |
+
python-multipart
|
| 12 |
+
langmem>=0.0.29
|
| 13 |
+
greenlet>=3.2.3
|
| 14 |
+
deepagents>=0.0.3
|
| 15 |
+
python-docx>=1.2.0
|
| 16 |
+
pydub
|
| 17 |
+
SpeechRecognition
|
| 18 |
+
PyPDF2>=3.0.1
|
| 19 |
+
asyncpg>=0.30.0
|
| 20 |
+
langchain_huggingface>=0.3.0
|
| 21 |
+
sentence-transformers>=5.0.0
|
| 22 |
+
pyowm>=3.3.0
|
| 23 |
+
geopy>=2.4.1
|
| 24 |
+
langchain-community>=0.3.27
|
| 25 |
+
langchain-tavily>=0.2.11
|
| 26 |
+
langgraph-api>=0.2.86
|
| 27 |
+
PyGithub>=2.7.0
|
| 28 |
+
langchain-openai>=0.3.28
|
| 29 |
+
langchain-google-genai
|
| 30 |
+
gtts>=2.5.4
|
| 31 |
+
timezonefinder>=7.0.1
|
| 32 |
+
geocoder>=1.38.1
|
| 33 |
+
aiosqlite>=0.21.0
|
| 34 |
+
requests>=2.32.4
|
| 35 |
+
aiogram>=3.21.0
|
| 36 |
+
aiohttp>=3.12.14
|
| 37 |
+
colorama>=0.4.6
|
| 38 |
+
colorlog>=6.9.0
|
| 39 |
+
pydantic>=2.11.7
|
| 40 |
+
pyttsx3>=2.99
|
| 41 |
+
pytz>=2025.2
|
| 42 |
+
pyyaml>=6.0.2
|
| 43 |
+
regex>=2024.11.6
|
| 44 |
+
tiktoken>=0.9.0
|
| 45 |
+
huggingface-hub>=0.33.4
|
| 46 |
+
fsspec>=2025.5.1
|
| 47 |
+
typing-extensions>=4.14.1
|
| 48 |
+
tqdm>=4.67.1
|
tmp/classifier.ckpt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
/Users/magomedpatahov/.cache/huggingface/hub/models--speechbrain--lang-id-voxlingua107-ecapa/snapshots/0253049ae131d6a4be1c4f0d8b0ff483a0f8c8e9/classifier.ckpt
|
tmp/embedding_model.ckpt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
/Users/magomedpatahov/.cache/huggingface/hub/models--speechbrain--lang-id-voxlingua107-ecapa/snapshots/0253049ae131d6a4be1c4f0d8b0ff483a0f8c8e9/embedding_model.ckpt
|
tmp/hyperparams.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
/Users/magomedpatahov/.cache/huggingface/hub/models--speechbrain--lang-id-voxlingua107-ecapa/snapshots/0253049ae131d6a4be1c4f0d8b0ff483a0f8c8e9/hyperparams.yaml
|
tmp/label_encoder.ckpt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
/Users/magomedpatahov/.cache/huggingface/hub/models--speechbrain--lang-id-voxlingua107-ecapa/snapshots/0253049ae131d6a4be1c4f0d8b0ff483a0f8c8e9/label_encoder.txt
|