Spaces:
Running
Running
Commit ·
83ce651
1
Parent(s): 549c0e7
session isolation
Browse files- all_code.txt +771 -229
- app/agents/adk_mathminds.py +25 -6
- app/core/orchestrator.py +38 -323
- app/models/gemini.py +0 -230
- app/models/qwen.py +0 -92
all_code.txt
CHANGED
|
@@ -1,4 +1,51 @@
|
|
| 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
================================================================================
|
| 3 |
FILE: e:\madhuri\mathminds\debug_env.py
|
| 4 |
================================================================================
|
|
@@ -207,110 +254,16 @@ FILE: e:\madhuri\mathminds\find_embedding_models.py
|
|
| 207 |
================================================================================
|
| 208 |
|
| 209 |
import os
|
| 210 |
-
import asyncio
|
| 211 |
-
from google import genai
|
| 212 |
from dotenv import load_dotenv
|
|
|
|
| 213 |
|
| 214 |
load_dotenv()
|
|
|
|
| 215 |
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
print("Searching for embedding models...")
|
| 221 |
-
try:
|
| 222 |
-
pager = client.models.list()
|
| 223 |
-
found = False
|
| 224 |
-
for model in pager:
|
| 225 |
-
if "embed" in model.name.lower():
|
| 226 |
-
print(f"FOUND: {model.name}")
|
| 227 |
-
found = True
|
| 228 |
-
if not found:
|
| 229 |
-
print("No embedding models found.")
|
| 230 |
-
|
| 231 |
-
except Exception as e:
|
| 232 |
-
print(f"Error: {e}")
|
| 233 |
-
|
| 234 |
-
if __name__ == "__main__":
|
| 235 |
-
asyncio.run(list_embedding_models())
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
================================================================================
|
| 239 |
-
FILE: e:\madhuri\mathminds\gradio_demo.py
|
| 240 |
-
================================================================================
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
import gradio as gr
|
| 244 |
-
import requests
|
| 245 |
-
from PIL import Image
|
| 246 |
-
import io
|
| 247 |
-
import base64
|
| 248 |
-
|
| 249 |
-
API_URL = "http://localhost:8000/solve"
|
| 250 |
-
|
| 251 |
-
def process_image(image):
|
| 252 |
-
if image is None:
|
| 253 |
-
return None
|
| 254 |
-
|
| 255 |
-
# Convert to base64
|
| 256 |
-
buffered = io.BytesIO()
|
| 257 |
-
image.save(buffered, format="JPEG")
|
| 258 |
-
img_str = base64.b64encode(buffered.getvalue()).decode()
|
| 259 |
-
return img_str
|
| 260 |
-
|
| 261 |
-
def solve_math(problem_text, image_input):
|
| 262 |
-
payload = {
|
| 263 |
-
"text": problem_text if problem_text else "",
|
| 264 |
-
}
|
| 265 |
-
|
| 266 |
-
if image_input:
|
| 267 |
-
img_b64 = process_image(image_input)
|
| 268 |
-
payload["image"] = img_b64
|
| 269 |
-
|
| 270 |
-
try:
|
| 271 |
-
response = requests.post(API_URL, json=payload)
|
| 272 |
-
response.raise_for_status()
|
| 273 |
-
data = response.json()
|
| 274 |
-
|
| 275 |
-
# Format output
|
| 276 |
-
answer = data.get("answer", "No answer provided")
|
| 277 |
-
steps = "\n".join(data.get("steps", []))
|
| 278 |
-
explanation = data.get("explanation", "")
|
| 279 |
-
|
| 280 |
-
output = f"## Answer\n{answer}\n\n"
|
| 281 |
-
if steps:
|
| 282 |
-
output += f"### Steps\n{steps}\n\n"
|
| 283 |
-
if explanation:
|
| 284 |
-
output += f"### Explanation\n{explanation}"
|
| 285 |
-
|
| 286 |
-
return output
|
| 287 |
-
|
| 288 |
-
except Exception as e:
|
| 289 |
-
return f"Error: {str(e)}"
|
| 290 |
-
|
| 291 |
-
# Define Interface
|
| 292 |
-
with gr.Blocks(title="MathMinds AI") as demo:
|
| 293 |
-
gr.Markdown("# 🧮 MathMinds AI Solver")
|
| 294 |
-
gr.Markdown("Submit a math problem (text or image) to get a step-by-step solution.")
|
| 295 |
-
|
| 296 |
-
with gr.Row():
|
| 297 |
-
with gr.Column():
|
| 298 |
-
input_text = gr.Textbox(label="Problem Text", placeholder="Type your math problem here...")
|
| 299 |
-
input_image = gr.Image(type="pil", label="Upload Image (Optional)")
|
| 300 |
-
submit_btn = gr.Button("Solve", variant="primary")
|
| 301 |
-
|
| 302 |
-
with gr.Column():
|
| 303 |
-
output_md = gr.Markdown(label="Solution")
|
| 304 |
-
|
| 305 |
-
submit_btn.click(
|
| 306 |
-
fn=solve_math,
|
| 307 |
-
inputs=[input_text, input_image],
|
| 308 |
-
outputs=output_md
|
| 309 |
-
)
|
| 310 |
-
|
| 311 |
-
if __name__ == "__main__":
|
| 312 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
| 313 |
-
|
| 314 |
|
| 315 |
================================================================================
|
| 316 |
FILE: e:\madhuri\mathminds\gunicorn_conf.py
|
|
@@ -348,6 +301,118 @@ loglevel = os.getenv("LOG_LEVEL", "info")
|
|
| 348 |
proc_name = "mathminds_api"
|
| 349 |
|
| 350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
================================================================================
|
| 352 |
FILE: e:\madhuri\mathminds\reproduce_crash.py
|
| 353 |
================================================================================
|
|
@@ -538,6 +603,43 @@ if __name__ == "__main__":
|
|
| 538 |
run()
|
| 539 |
|
| 540 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 541 |
================================================================================
|
| 542 |
FILE: e:\madhuri\mathminds\verify_phase1.py
|
| 543 |
================================================================================
|
|
@@ -928,161 +1030,180 @@ FILE: e:\madhuri\mathminds\app\__init__.py
|
|
| 928 |
|
| 929 |
|
| 930 |
================================================================================
|
| 931 |
-
FILE: e:\madhuri\mathminds\app\agents\
|
| 932 |
================================================================================
|
| 933 |
|
|
|
|
| 934 |
import logging
|
| 935 |
import asyncio
|
| 936 |
-
|
| 937 |
-
|
| 938 |
-
# LangChain Imports
|
| 939 |
-
# LangChain Imports
|
| 940 |
-
try:
|
| 941 |
-
from langchain.agents import initialize_agent, Tool
|
| 942 |
-
# Try importing AgentType, if not found, we use string
|
| 943 |
-
try:
|
| 944 |
-
from langchain.agents import AgentType
|
| 945 |
-
except ImportError:
|
| 946 |
-
AgentType = None
|
| 947 |
-
except ImportError as e:
|
| 948 |
-
# If langchain is completely missing
|
| 949 |
-
initialize_agent = None
|
| 950 |
-
Tool = None
|
| 951 |
-
AgentType = None
|
| 952 |
|
| 953 |
-
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
| 957 |
-
ChatGoogleGenerativeAI = None
|
| 958 |
-
try:
|
| 959 |
-
from langchain.memory import ConversationBufferMemory
|
| 960 |
-
except ImportError:
|
| 961 |
-
# Fallback or error logging
|
| 962 |
-
logger.error("Failed to import 'ConversationBufferMemory'. Ensure 'langchain' is installed correctly.")
|
| 963 |
-
ConversationBufferMemory = None
|
| 964 |
|
| 965 |
-
# App Imports
|
| 966 |
from app.core.settings import settings
|
| 967 |
-
from app.tools.web_scraper import WebScraper
|
| 968 |
from app.tools.symbolic_solver import SymbolicSolver
|
| 969 |
-
|
| 970 |
-
|
| 971 |
-
# Actually, I should verify content of app/tools/web_scraper.py first to be safe, but I'll write the agent to be robust.
|
| 972 |
|
| 973 |
logger = logging.getLogger(__name__)
|
| 974 |
|
| 975 |
-
class
|
| 976 |
"""
|
| 977 |
-
Agent-based architecture using
|
| 978 |
-
|
| 979 |
"""
|
| 980 |
|
| 981 |
-
def __init__(self, model_name: str = "gemini-
|
| 982 |
-
"""
|
| 983 |
-
Initialize the LangChain agent with Gemini and tools.
|
| 984 |
-
"""
|
| 985 |
self.api_key = settings.GOOGLE_API_KEY
|
| 986 |
if not self.api_key:
|
| 987 |
logger.warning("No Google API Key found. Agent will fail.")
|
| 988 |
|
| 989 |
-
|
| 990 |
-
|
| 991 |
-
|
| 992 |
-
|
| 993 |
-
|
| 994 |
-
|
| 995 |
-
#
|
| 996 |
-
|
| 997 |
-
|
| 998 |
-
|
| 999 |
-
|
| 1000 |
-
|
| 1001 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1002 |
|
| 1003 |
-
|
| 1004 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1005 |
|
| 1006 |
-
|
| 1007 |
-
|
| 1008 |
-
|
| 1009 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1010 |
)
|
| 1011 |
-
|
| 1012 |
-
# 4. Initialize Agent
|
| 1013 |
-
# ZERO_SHOT_REACT_DESCRIPTION is good for general purpose tool use
|
| 1014 |
-
agent_type = AgentType.ZERO_SHOT_REACT_DESCRIPTION if AgentType else "zero-shot-react-description"
|
| 1015 |
|
| 1016 |
-
|
| 1017 |
-
|
| 1018 |
-
|
| 1019 |
-
|
| 1020 |
-
|
| 1021 |
-
|
| 1022 |
-
|
| 1023 |
-
|
| 1024 |
-
|
| 1025 |
-
logger.info("MathMindsLangChainAgent initialized successfully.")
|
| 1026 |
-
except Exception as e:
|
| 1027 |
-
logger.error(f"Failed to initialize LangChain agent: {e}")
|
| 1028 |
-
self.agent_executor = None
|
| 1029 |
-
|
| 1030 |
-
def _load_tools(self) -> List[Tool]:
|
| 1031 |
-
"""
|
| 1032 |
-
Load and wrap all available tools for the agent.
|
| 1033 |
-
"""
|
| 1034 |
-
tools = []
|
| 1035 |
-
try:
|
| 1036 |
-
# 1. Web Scraper
|
| 1037 |
-
web_scraper = WebScraper(headless=True)
|
| 1038 |
-
|
| 1039 |
-
# Sync wrapper for WebScraper
|
| 1040 |
-
def sync_scrape(query: str):
|
| 1041 |
-
# We use the standalone sync function from the module
|
| 1042 |
-
return run_playwright_sync(query, headless=True)
|
| 1043 |
-
|
| 1044 |
-
# Async wrapper for WebScraper
|
| 1045 |
-
async def async_scrape(query: str):
|
| 1046 |
-
return await web_scraper.scrape(query)
|
| 1047 |
-
|
| 1048 |
-
tools.append(Tool(
|
| 1049 |
-
name="Web Search",
|
| 1050 |
-
func=sync_scrape,
|
| 1051 |
-
coroutine=async_scrape,
|
| 1052 |
-
description="Useful for finding current events, prices, weather, and general information from the internet. Input should be a search query."
|
| 1053 |
-
))
|
| 1054 |
-
|
| 1055 |
-
# 2. Symbolic Solver
|
| 1056 |
-
symbolic_solver = SymbolicSolver()
|
| 1057 |
-
tools.append(Tool(
|
| 1058 |
-
name="Math Solver",
|
| 1059 |
-
func=symbolic_solver.solve,
|
| 1060 |
-
description="Useful for solving symbolic math problems like equations, derivatives, integrals, and simplification. Input should be a math expression or problem description."
|
| 1061 |
-
))
|
| 1062 |
-
except Exception as e:
|
| 1063 |
-
logger.error(f"Error loading tools: {e}")
|
| 1064 |
|
| 1065 |
-
|
| 1066 |
|
| 1067 |
-
async def solve(self, problem: str, image_data: Optional[str] = None) ->
|
| 1068 |
"""
|
| 1069 |
Main entry point for the agent to solve a problem.
|
| 1070 |
"""
|
| 1071 |
-
|
| 1072 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1073 |
|
| 1074 |
-
|
| 1075 |
-
|
| 1076 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1077 |
|
| 1078 |
-
try:
|
| 1079 |
-
# Run the agent
|
| 1080 |
-
# use ainvoke for async execution
|
| 1081 |
-
result = await self.agent_executor.ainvoke({"input": input_text})
|
| 1082 |
-
return result['output']
|
| 1083 |
except Exception as e:
|
| 1084 |
-
logger.error(f"Agent execution failed: {e}")
|
| 1085 |
-
return f"Error
|
| 1086 |
|
| 1087 |
|
| 1088 |
================================================================================
|
|
@@ -2255,6 +2376,7 @@ from app.tools.vision_analyzer import VisionAnalyzer
|
|
| 2255 |
from app.tools.similarity_search import SimilarProblemFinder
|
| 2256 |
from app.core.math_normalizer import MathQueryNormalizer
|
| 2257 |
from app.core.settings import settings
|
|
|
|
| 2258 |
|
| 2259 |
logger = logging.getLogger(__name__)
|
| 2260 |
|
|
@@ -2290,6 +2412,9 @@ class Orchestrator:
|
|
| 2290 |
self.similarity_finder = SimilarProblemFinder()
|
| 2291 |
self.math_normalizer = MathQueryNormalizer()
|
| 2292 |
|
|
|
|
|
|
|
|
|
|
| 2293 |
except Exception as e:
|
| 2294 |
logger.critical(f"Failed to initialize Orchestrator: {e}")
|
| 2295 |
raise
|
|
@@ -2371,7 +2496,32 @@ class Orchestrator:
|
|
| 2371 |
result_schema["explanation"] = processed.error_message
|
| 2372 |
return self._finalize_result(result_schema, start_time)
|
| 2373 |
|
| 2374 |
-
# 2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2375 |
image_data = processed.metadata.get("image_data")
|
| 2376 |
p_hash = generate_problem_hash(processed.cleaned_content, image_data)
|
| 2377 |
lock_acquired = False
|
|
@@ -2527,7 +2677,7 @@ class Orchestrator:
|
|
| 2527 |
prompt = f"{user_context_str}\n\nProblem: {processed.cleaned_content}"
|
| 2528 |
|
| 2529 |
gem_res = await self._safe_llm_call(prompt)
|
| 2530 |
-
self._populate_success(result_schema, gem_res, "gemini-
|
| 2531 |
|
| 2532 |
# 5. Save & Index
|
| 2533 |
if result_schema["status"] == "success":
|
|
@@ -3746,7 +3896,7 @@ class VertexGeminiModel(BaseModel):
|
|
| 3746 |
Offers enterprise features, higher quotas, and better monitoring than the standard API.
|
| 3747 |
"""
|
| 3748 |
|
| 3749 |
-
def __init__(self, project_id: str = None, location: str = "us-central1", model_name: str = "gemini-
|
| 3750 |
"""
|
| 3751 |
Initialize Vertex AI client.
|
| 3752 |
"""
|
|
@@ -5697,6 +5847,323 @@ except Exception as e:
|
|
| 5697 |
print(f"Error: {e}")
|
| 5698 |
|
| 5699 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5700 |
================================================================================
|
| 5701 |
FILE: e:\madhuri\mathminds\tests\test_hashing.py
|
| 5702 |
================================================================================
|
|
@@ -6104,7 +6571,7 @@ async def test_orchestrator_routes_model_preference():
|
|
| 6104 |
mock_solver.solve.assert_called_with(
|
| 6105 |
"Solve this",
|
| 6106 |
image_data=None,
|
| 6107 |
-
model_name="gemini-
|
| 6108 |
)
|
| 6109 |
|
| 6110 |
print("\n[PASS] Orchestrator Routing Test Passed!")
|
|
@@ -6447,6 +6914,81 @@ if __name__ == '__main__':
|
|
| 6447 |
unittest.main()
|
| 6448 |
|
| 6449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6450 |
================================================================================
|
| 6451 |
FILE: e:\madhuri\mathminds\tests\test_smart_routing.py
|
| 6452 |
================================================================================
|
|
|
|
| 1 |
|
| 2 |
+
================================================================================
|
| 3 |
+
FILE: e:\madhuri\mathminds\check_agent.py
|
| 4 |
+
================================================================================
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from google.adk.agents import Agent
|
| 9 |
+
print("Agent class found in google.adk.agents")
|
| 10 |
+
except ImportError:
|
| 11 |
+
print("Agent class NOT found in google.adk.agents")
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
from google.adk.agents import LlmAgent
|
| 15 |
+
print("LlmAgent class found in google.adk.agents")
|
| 16 |
+
except ImportError:
|
| 17 |
+
print("LlmAgent class NOT found in google.adk.agents")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
================================================================================
|
| 21 |
+
FILE: e:\madhuri\mathminds\debug_adk.py
|
| 22 |
+
================================================================================
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
import sys
|
| 26 |
+
try:
|
| 27 |
+
import google
|
| 28 |
+
print("google imported")
|
| 29 |
+
print(dir(google))
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
import google.adk
|
| 33 |
+
print("google.adk imported")
|
| 34 |
+
print(dir(google.adk))
|
| 35 |
+
except ImportError as e:
|
| 36 |
+
print(f"Failed to import google.adk: {e}")
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
from google import adk
|
| 40 |
+
print("from google import adk succeeded")
|
| 41 |
+
print(dir(adk))
|
| 42 |
+
except ImportError as e:
|
| 43 |
+
print(f"Failed to from google import adk: {e}")
|
| 44 |
+
|
| 45 |
+
except ImportError as e:
|
| 46 |
+
print(f"Failed to import google: {e}")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
================================================================================
|
| 50 |
FILE: e:\madhuri\mathminds\debug_env.py
|
| 51 |
================================================================================
|
|
|
|
| 254 |
================================================================================
|
| 255 |
|
| 256 |
import os
|
|
|
|
|
|
|
| 257 |
from dotenv import load_dotenv
|
| 258 |
+
import google.generativeai as genai
|
| 259 |
|
| 260 |
load_dotenv()
|
| 261 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 262 |
|
| 263 |
+
print("Available models:")
|
| 264 |
+
for model in genai.list_models():
|
| 265 |
+
if "gemini" in model.name:
|
| 266 |
+
print(f" {model.name:50} {model.display_name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
|
| 268 |
================================================================================
|
| 269 |
FILE: e:\madhuri\mathminds\gunicorn_conf.py
|
|
|
|
| 301 |
proc_name = "mathminds_api"
|
| 302 |
|
| 303 |
|
| 304 |
+
================================================================================
|
| 305 |
+
FILE: e:\madhuri\mathminds\inspect_adk.py
|
| 306 |
+
================================================================================
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
import os
|
| 310 |
+
import sys
|
| 311 |
+
from dotenv import load_dotenv
|
| 312 |
+
|
| 313 |
+
# Add project root to path
|
| 314 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 315 |
+
|
| 316 |
+
try:
|
| 317 |
+
from google.adk.agents import LlmAgent
|
| 318 |
+
import inspect
|
| 319 |
+
|
| 320 |
+
print("=== LlmAgent.__init__ ===")
|
| 321 |
+
print(inspect.signature(LlmAgent.__init__))
|
| 322 |
+
print(LlmAgent.__init__.__doc__)
|
| 323 |
+
|
| 324 |
+
print("\n=== LlmAgent.run ===")
|
| 325 |
+
if hasattr(LlmAgent, 'run'):
|
| 326 |
+
print(inspect.signature(LlmAgent.run))
|
| 327 |
+
print(LlmAgent.run.__doc__)
|
| 328 |
+
else:
|
| 329 |
+
print("No run method")
|
| 330 |
+
|
| 331 |
+
print("\n=== google.adk.runners.Runner ===")
|
| 332 |
+
try:
|
| 333 |
+
from google.adk.runners import Runner
|
| 334 |
+
print(inspect.signature(Runner.__init__))
|
| 335 |
+
print(Runner.__init__.__doc__)
|
| 336 |
+
|
| 337 |
+
print("\n=== Runner.run_async ===")
|
| 338 |
+
if hasattr(Runner, 'run_async'):
|
| 339 |
+
print(inspect.signature(Runner.run_async))
|
| 340 |
+
print(Runner.run_async.__doc__)
|
| 341 |
+
except ImportError:
|
| 342 |
+
print("Could not import google.adk.runners.Runner")
|
| 343 |
+
|
| 344 |
+
print("\n=== google.adk.model.Model ===")
|
| 345 |
+
print("\n=== google.adk.sessions.in_memory_session_service.InMemorySessionService ===")
|
| 346 |
+
try:
|
| 347 |
+
from google.adk.sessions.in_memory_session_service import InMemorySessionService
|
| 348 |
+
print(dir(InMemorySessionService))
|
| 349 |
+
print(inspect.signature(InMemorySessionService.create_session))
|
| 350 |
+
except ImportError:
|
| 351 |
+
print("Could not import google.adk.sessions.in_memory_session_service.InMemorySessionService")
|
| 352 |
+
except Exception as e:
|
| 353 |
+
print(f"Error inspecting InMemorySessionService: {e}")
|
| 354 |
+
|
| 355 |
+
try:
|
| 356 |
+
import google.genai.types
|
| 357 |
+
print("\n=== google.genai.types ===")
|
| 358 |
+
print("google.genai.types found")
|
| 359 |
+
except ImportError:
|
| 360 |
+
print("Could not import google.genai.types")
|
| 361 |
+
|
| 362 |
+
except ImportError as e:
|
| 363 |
+
print(f"Failed to import: {e}")
|
| 364 |
+
except Exception as e:
|
| 365 |
+
print(f"Error: {e}")
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
================================================================================
|
| 369 |
+
FILE: e:\madhuri\mathminds\inspect_agent_class.py
|
| 370 |
+
================================================================================
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
import inspect
|
| 374 |
+
from google.adk.agents import Agent
|
| 375 |
+
|
| 376 |
+
print("=== Agent.__init__ ===")
|
| 377 |
+
print(inspect.signature(Agent.__init__))
|
| 378 |
+
print(Agent.__init__.__doc__)
|
| 379 |
+
|
| 380 |
+
print("\n=== Agent.run ===")
|
| 381 |
+
if hasattr(Agent, 'run'):
|
| 382 |
+
print(inspect.signature(Agent.run))
|
| 383 |
+
else:
|
| 384 |
+
print("No run method")
|
| 385 |
+
|
| 386 |
+
print("\n=== Agent.run_async ===")
|
| 387 |
+
if hasattr(Agent, 'run_async'):
|
| 388 |
+
print(inspect.signature(Agent.run_async))
|
| 389 |
+
else:
|
| 390 |
+
print("No run_async method")
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
================================================================================
|
| 394 |
+
FILE: e:\madhuri\mathminds\locate_adk_modules.py
|
| 395 |
+
================================================================================
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
import pkgutil
|
| 399 |
+
import google.adk
|
| 400 |
+
import importlib
|
| 401 |
+
|
| 402 |
+
def list_submodules(package, prefix):
|
| 403 |
+
print(f"package: {prefix}")
|
| 404 |
+
for loader, module_name, is_pkg in pkgutil.walk_packages(package.__path__, prefix + "."):
|
| 405 |
+
print(module_name)
|
| 406 |
+
if is_pkg:
|
| 407 |
+
try:
|
| 408 |
+
module = importlib.import_module(module_name)
|
| 409 |
+
# print(dir(module))
|
| 410 |
+
except Exception as e:
|
| 411 |
+
print(f"Failed to import {module_name}: {e}")
|
| 412 |
+
|
| 413 |
+
list_submodules(google.adk, "google.adk")
|
| 414 |
+
|
| 415 |
+
|
| 416 |
================================================================================
|
| 417 |
FILE: e:\madhuri\mathminds\reproduce_crash.py
|
| 418 |
================================================================================
|
|
|
|
| 603 |
run()
|
| 604 |
|
| 605 |
|
| 606 |
+
================================================================================
|
| 607 |
+
FILE: e:\madhuri\mathminds\test_tool_wrapping.py
|
| 608 |
+
================================================================================
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
from google.adk.agents import LlmAgent
|
| 612 |
+
from google.adk.tools import FunctionTool
|
| 613 |
+
|
| 614 |
+
def my_tool(x: int) -> int:
|
| 615 |
+
"""doubles x"""
|
| 616 |
+
return x * 2
|
| 617 |
+
|
| 618 |
+
try:
|
| 619 |
+
print("Trying to init LlmAgent with raw function...")
|
| 620 |
+
agent = LlmAgent(
|
| 621 |
+
name="test",
|
| 622 |
+
model="gemini-flash-latest",
|
| 623 |
+
instruction="test",
|
| 624 |
+
tools=[my_tool]
|
| 625 |
+
)
|
| 626 |
+
print("Success! LlmAgent accepted raw function.")
|
| 627 |
+
except Exception as e:
|
| 628 |
+
print(f"Failed with raw function: {e}")
|
| 629 |
+
|
| 630 |
+
try:
|
| 631 |
+
print("\nTrying to init LlmAgent with FunctionTool...")
|
| 632 |
+
agent = LlmAgent(
|
| 633 |
+
name="test",
|
| 634 |
+
model="gemini-flash-latest",
|
| 635 |
+
instruction="test",
|
| 636 |
+
tools=[FunctionTool(my_tool)]
|
| 637 |
+
)
|
| 638 |
+
print("Success! LlmAgent accepted FunctionTool.")
|
| 639 |
+
except Exception as e:
|
| 640 |
+
print(f"Failed with FunctionTool: {e}")
|
| 641 |
+
|
| 642 |
+
|
| 643 |
================================================================================
|
| 644 |
FILE: e:\madhuri\mathminds\verify_phase1.py
|
| 645 |
================================================================================
|
|
|
|
| 1030 |
|
| 1031 |
|
| 1032 |
================================================================================
|
| 1033 |
+
FILE: e:\madhuri\mathminds\app\agents\adk_mathminds.py
|
| 1034 |
================================================================================
|
| 1035 |
|
| 1036 |
+
|
| 1037 |
import logging
|
| 1038 |
import asyncio
|
| 1039 |
+
import base64
|
| 1040 |
+
from typing import Optional, Dict, Any, List
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1041 |
|
| 1042 |
+
from google.adk.agents import Agent
|
| 1043 |
+
from google.adk.runners import Runner
|
| 1044 |
+
from google.adk.sessions.in_memory_session_service import InMemorySessionService
|
| 1045 |
+
from google.genai import types
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1046 |
|
|
|
|
| 1047 |
from app.core.settings import settings
|
| 1048 |
+
from app.tools.web_scraper import WebScraper
|
| 1049 |
from app.tools.symbolic_solver import SymbolicSolver
|
| 1050 |
+
from app.tools.similarity_search import SimilarProblemFinder
|
| 1051 |
+
from app.core.math_normalizer import MathQueryNormalizer
|
|
|
|
| 1052 |
|
| 1053 |
logger = logging.getLogger(__name__)
|
| 1054 |
|
| 1055 |
+
class MathMindsADKAgent:
|
| 1056 |
"""
|
| 1057 |
+
Agent-based architecture using Google ADK (GitHub version).
|
| 1058 |
+
Refined to match official Multitool Agent documentation patterns.
|
| 1059 |
"""
|
| 1060 |
|
| 1061 |
+
def __init__(self, model_name: str = "gemini-2.5-flash"):
|
|
|
|
|
|
|
|
|
|
| 1062 |
self.api_key = settings.GOOGLE_API_KEY
|
| 1063 |
if not self.api_key:
|
| 1064 |
logger.warning("No Google API Key found. Agent will fail.")
|
| 1065 |
|
| 1066 |
+
# Initialize Tool Instances
|
| 1067 |
+
self.web_scraper = WebScraper(headless=True)
|
| 1068 |
+
self.symbolic_solver = SymbolicSolver()
|
| 1069 |
+
self.normalizer = MathQueryNormalizer()
|
| 1070 |
+
self.similar_finder = SimilarProblemFinder()
|
| 1071 |
+
|
| 1072 |
+
# Define Tools as simpler closures
|
| 1073 |
+
# Docs pattern: simple functions, passed in a list.
|
| 1074 |
+
async def web_search(query: str) -> str:
|
| 1075 |
+
"""
|
| 1076 |
+
Useful for finding current events, prices, weather, and general information from the internet.
|
| 1077 |
+
|
| 1078 |
+
Args:
|
| 1079 |
+
query: The search query.
|
| 1080 |
+
"""
|
| 1081 |
+
result = await self.web_scraper.scrape(query)
|
| 1082 |
+
if result.get("status") == "success":
|
| 1083 |
+
return result.get("content", "No content found.")
|
| 1084 |
+
else:
|
| 1085 |
+
return f"Error searching web: {result.get('error')}"
|
| 1086 |
|
| 1087 |
+
def math_solver(problem: str) -> str:
|
| 1088 |
+
"""
|
| 1089 |
+
Useful for solving symbolic math problems like equations, derivatives, integrals, and simplification.
|
| 1090 |
+
|
| 1091 |
+
Args:
|
| 1092 |
+
problem: The math problem description or expression.
|
| 1093 |
+
"""
|
| 1094 |
+
intent = self.normalizer.normalize(problem)
|
| 1095 |
+
query_obj = intent if intent else problem
|
| 1096 |
+
result = self.symbolic_solver.solve(query_obj)
|
| 1097 |
+
|
| 1098 |
+
if result.get("status") == "success":
|
| 1099 |
+
return result.get("content", "No solution found.")
|
| 1100 |
+
else:
|
| 1101 |
+
return f"Error solving math: {result.get('error')}"
|
| 1102 |
|
| 1103 |
+
def find_similar_problems(query: str) -> str:
|
| 1104 |
+
"""
|
| 1105 |
+
Useful for finding similar math problems and their solutions from the database to learn how they were solved.
|
| 1106 |
+
Use this when you are stuck or want to see examples.
|
| 1107 |
+
|
| 1108 |
+
Args:
|
| 1109 |
+
query: The math problem to find similar examples for.
|
| 1110 |
+
"""
|
| 1111 |
+
results = self.similar_finder.search(query, limit=2)
|
| 1112 |
+
if not results:
|
| 1113 |
+
return "No similar problems found."
|
| 1114 |
+
|
| 1115 |
+
formatted = "Here are some similar problems and their solutions:\n"
|
| 1116 |
+
for item in results:
|
| 1117 |
+
formatted += f"Problem: {item.get('problem_text')}\nSolution: {item.get('solution_text')}\n---\n"
|
| 1118 |
+
return formatted
|
| 1119 |
+
|
| 1120 |
+
# Initialize Agent
|
| 1121 |
+
# Using 'Agent' class as per official docs, passing functions directly.
|
| 1122 |
+
self.agent = Agent(
|
| 1123 |
+
name="math_minds_core",
|
| 1124 |
+
model=model_name,
|
| 1125 |
+
tools=[web_search, math_solver, find_similar_problems], # Passed directly as function list
|
| 1126 |
+
instruction=(
|
| 1127 |
+
"You are MathMinds AI, a helpful and precise mathematical assistant. "
|
| 1128 |
+
"You have access to tools for solving symbolic math problems, searching the web, and finding similar solved problems. "
|
| 1129 |
+
"If an image is provided, analyze it mathematically. "
|
| 1130 |
+
"Use 'Math Solver' for distinct math problems (equations, calculus, etc.). "
|
| 1131 |
+
"Use 'Web Search' for real-world data (prices, weather, facts). "
|
| 1132 |
+
"Use 'Find Similar Problems' to look up examples if you are unsure how to solve a problem. "
|
| 1133 |
+
"Always explain your steps clearly."
|
| 1134 |
+
)
|
| 1135 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1136 |
|
| 1137 |
+
# Session Service
|
| 1138 |
+
self.session_service = InMemorySessionService()
|
| 1139 |
+
|
| 1140 |
+
# Runner
|
| 1141 |
+
self.runner = Runner(
|
| 1142 |
+
app_name="mathminds",
|
| 1143 |
+
agent=self.agent,
|
| 1144 |
+
session_service=self.session_service
|
| 1145 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1146 |
|
| 1147 |
+
logger.info("MathMindsADKAgent initialized successfully (Doc Standard).")
|
| 1148 |
|
| 1149 |
+
async def solve(self, problem: str, image_data: Optional[str] = None, session_id: str = "default_session", user_id: str = "default_user") -> str:
|
| 1150 |
"""
|
| 1151 |
Main entry point for the agent to solve a problem.
|
| 1152 |
"""
|
| 1153 |
+
# IDs are now passed in, with fallbacks for backward compatibility
|
| 1154 |
+
|
| 1155 |
+
try:
|
| 1156 |
+
# Ensure session exists (create if not found)
|
| 1157 |
+
try:
|
| 1158 |
+
existing = await self.session_service.get_session(
|
| 1159 |
+
app_name="mathminds", session_id=session_id, user_id=user_id
|
| 1160 |
+
)
|
| 1161 |
+
if not existing:
|
| 1162 |
+
await self.session_service.create_session(
|
| 1163 |
+
app_name="mathminds", user_id=user_id, session_id=session_id
|
| 1164 |
+
)
|
| 1165 |
+
except Exception:
|
| 1166 |
+
try:
|
| 1167 |
+
await self.session_service.create_session(
|
| 1168 |
+
app_name="mathminds", user_id=user_id, session_id=session_id
|
| 1169 |
+
)
|
| 1170 |
+
except Exception as create_err:
|
| 1171 |
+
logger.warning(f"Session creation issue (might already exist): {create_err}")
|
| 1172 |
|
| 1173 |
+
# Construct Message Parts
|
| 1174 |
+
parts = []
|
| 1175 |
+
parts.append(types.Part.from_text(text=problem))
|
| 1176 |
+
|
| 1177 |
+
if image_data:
|
| 1178 |
+
try:
|
| 1179 |
+
img_bytes = base64.b64decode(image_data)
|
| 1180 |
+
mime_type = "image/png"
|
| 1181 |
+
if image_data.startswith("/9j/"):
|
| 1182 |
+
mime_type = "image/jpeg"
|
| 1183 |
+
|
| 1184 |
+
parts.append(types.Part.from_bytes(data=img_bytes, mime_type=mime_type))
|
| 1185 |
+
logger.info("Attached image to agent request.")
|
| 1186 |
+
except Exception as e:
|
| 1187 |
+
logger.error(f"Failed to process image data: {e}")
|
| 1188 |
+
parts.append(types.Part.from_text(text="[Error: attached image could not be processed]"))
|
| 1189 |
+
|
| 1190 |
+
# Execute Agent
|
| 1191 |
+
response_text = ""
|
| 1192 |
+
async for event in self.runner.run_async(
|
| 1193 |
+
user_id=user_id,
|
| 1194 |
+
session_id=session_id,
|
| 1195 |
+
new_message=types.Content(role="user", parts=parts)
|
| 1196 |
+
):
|
| 1197 |
+
if event.content and event.content.parts:
|
| 1198 |
+
for part in event.content.parts:
|
| 1199 |
+
if part.text:
|
| 1200 |
+
response_text += part.text
|
| 1201 |
+
|
| 1202 |
+
return response_text
|
| 1203 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1204 |
except Exception as e:
|
| 1205 |
+
logger.error(f"ADK Agent execution failed: {e}")
|
| 1206 |
+
return f"Error processing request: {str(e)}"
|
| 1207 |
|
| 1208 |
|
| 1209 |
================================================================================
|
|
|
|
| 2376 |
from app.tools.similarity_search import SimilarProblemFinder
|
| 2377 |
from app.core.math_normalizer import MathQueryNormalizer
|
| 2378 |
from app.core.settings import settings
|
| 2379 |
+
from app.agents.adk_mathminds import MathMindsADKAgent
|
| 2380 |
|
| 2381 |
logger = logging.getLogger(__name__)
|
| 2382 |
|
|
|
|
| 2412 |
self.similarity_finder = SimilarProblemFinder()
|
| 2413 |
self.math_normalizer = MathQueryNormalizer()
|
| 2414 |
|
| 2415 |
+
# Agents
|
| 2416 |
+
self.adk_agent = MathMindsADKAgent()
|
| 2417 |
+
|
| 2418 |
except Exception as e:
|
| 2419 |
logger.critical(f"Failed to initialize Orchestrator: {e}")
|
| 2420 |
raise
|
|
|
|
| 2496 |
result_schema["explanation"] = processed.error_message
|
| 2497 |
return self._finalize_result(result_schema, start_time)
|
| 2498 |
|
| 2499 |
+
# 2. Routing (Agent vs Deterministic)
|
| 2500 |
+
if model_preference == "agent":
|
| 2501 |
+
try:
|
| 2502 |
+
logger.info("Routing to Google ADK Agent")
|
| 2503 |
+
agent_res = await self.adk_agent.solve(
|
| 2504 |
+
processed.cleaned_content,
|
| 2505 |
+
processed.metadata.get("image_data"),
|
| 2506 |
+
session_id=session_id or "default_session",
|
| 2507 |
+
user_id=user_id or "default_user"
|
| 2508 |
+
)
|
| 2509 |
+
|
| 2510 |
+
# Agent returns a string usually, we need to wrap it
|
| 2511 |
+
result_schema["status"] = "success"
|
| 2512 |
+
result_schema["source"] = "google_adk_agent"
|
| 2513 |
+
result_schema["answer"] = agent_res
|
| 2514 |
+
result_schema["explanation"] = "Solved by AI Agent using tools (Google ADK)."
|
| 2515 |
+
result_schema["metadata"]["model"] = "gemini-flash-adk"
|
| 2516 |
+
|
| 2517 |
+
return self._finalize_result(result_schema, start_time)
|
| 2518 |
+
except Exception as e:
|
| 2519 |
+
logger.error(f"Agent failed: {e}")
|
| 2520 |
+
result_schema["error"] = str(e)
|
| 2521 |
+
# Fallback to standard flow? No, report error for explicit preference.
|
| 2522 |
+
return self._finalize_result(result_schema, start_time)
|
| 2523 |
+
|
| 2524 |
+
# 3. Hashing & Cache
|
| 2525 |
image_data = processed.metadata.get("image_data")
|
| 2526 |
p_hash = generate_problem_hash(processed.cleaned_content, image_data)
|
| 2527 |
lock_acquired = False
|
|
|
|
| 2677 |
prompt = f"{user_context_str}\n\nProblem: {processed.cleaned_content}"
|
| 2678 |
|
| 2679 |
gem_res = await self._safe_llm_call(prompt)
|
| 2680 |
+
self._populate_success(result_schema, gem_res, "gemini-2.5-flash")
|
| 2681 |
|
| 2682 |
# 5. Save & Index
|
| 2683 |
if result_schema["status"] == "success":
|
|
|
|
| 3896 |
Offers enterprise features, higher quotas, and better monitoring than the standard API.
|
| 3897 |
"""
|
| 3898 |
|
| 3899 |
+
def __init__(self, project_id: str = None, location: str = "us-central1", model_name: str = "gemini-2.5-flash"):
|
| 3900 |
"""
|
| 3901 |
Initialize Vertex AI client.
|
| 3902 |
"""
|
|
|
|
| 5847 |
print(f"Error: {e}")
|
| 5848 |
|
| 5849 |
|
| 5850 |
+
================================================================================
|
| 5851 |
+
FILE: e:\madhuri\mathminds\tests\test_adk_agent.py
|
| 5852 |
+
================================================================================
|
| 5853 |
+
|
| 5854 |
+
|
| 5855 |
+
import asyncio
|
| 5856 |
+
import os
|
| 5857 |
+
import logging
|
| 5858 |
+
from dotenv import load_dotenv
|
| 5859 |
+
|
| 5860 |
+
# Load environment variables
|
| 5861 |
+
load_dotenv()
|
| 5862 |
+
|
| 5863 |
+
# Configure logging
|
| 5864 |
+
logging.basicConfig(level=logging.INFO)
|
| 5865 |
+
logger = logging.getLogger(__name__)
|
| 5866 |
+
|
| 5867 |
+
# Import Google ADK components
|
| 5868 |
+
from google.adk.agents.llm_agent import LlmAgent
|
| 5869 |
+
from google.adk.models.google_llm import Gemini
|
| 5870 |
+
from google.adk.tools.function_tool import FunctionTool
|
| 5871 |
+
from google.adk.runners import Runner
|
| 5872 |
+
from google.adk.sessions.in_memory_session_service import InMemorySessionService
|
| 5873 |
+
from google.genai import types
|
| 5874 |
+
|
| 5875 |
+
# Define a simple tool
|
| 5876 |
+
def calculator(a: int, b: int, op: str) -> int:
|
| 5877 |
+
"""Performs simple arithmetic operations.
|
| 5878 |
+
|
| 5879 |
+
Args:
|
| 5880 |
+
a: The first number.
|
| 5881 |
+
b: The second number.
|
| 5882 |
+
op: The operation ('+', '-', '*', '/').
|
| 5883 |
+
"""
|
| 5884 |
+
logger.info(f"Calculator called with a={a}, b={b}, op={op}")
|
| 5885 |
+
if op == '+':
|
| 5886 |
+
return a + b
|
| 5887 |
+
elif op == '-':
|
| 5888 |
+
return a - b
|
| 5889 |
+
elif op == '*':
|
| 5890 |
+
return a * b
|
| 5891 |
+
elif op == '/':
|
| 5892 |
+
return int(a / b)
|
| 5893 |
+
return 0
|
| 5894 |
+
|
| 5895 |
+
async def main():
|
| 5896 |
+
print("Initializing Google ADK Agent...")
|
| 5897 |
+
|
| 5898 |
+
# 1. Create Model
|
| 5899 |
+
# Ensure GOOGLE_API_KEY is set in .env
|
| 5900 |
+
model = Gemini(model="gemini-2.5-flash")
|
| 5901 |
+
|
| 5902 |
+
# 2. Create Tools
|
| 5903 |
+
calc_tool = FunctionTool(calculator)
|
| 5904 |
+
|
| 5905 |
+
# 3. Create Agent
|
| 5906 |
+
agent = LlmAgent(
|
| 5907 |
+
name="math_helper",
|
| 5908 |
+
model=model,
|
| 5909 |
+
tools=[calc_tool],
|
| 5910 |
+
instruction="You are a helpful math assistant. Use the calculator tool for computations."
|
| 5911 |
+
)
|
| 5912 |
+
|
| 5913 |
+
# 4. Create Services
|
| 5914 |
+
session_service = InMemorySessionService()
|
| 5915 |
+
|
| 5916 |
+
# 5. Create Runner
|
| 5917 |
+
runner = Runner(
|
| 5918 |
+
app_name="mathminds_adk_test",
|
| 5919 |
+
agent=agent,
|
| 5920 |
+
session_service=session_service
|
| 5921 |
+
)
|
| 5922 |
+
|
| 5923 |
+
# 6. Run Agent
|
| 5924 |
+
user_id = "test_user"
|
| 5925 |
+
session_id = "test_session"
|
| 5926 |
+
prompt = "Calculate 15 * 12 then add 50."
|
| 5927 |
+
|
| 5928 |
+
print(f"\nUser: {prompt}")
|
| 5929 |
+
|
| 5930 |
+
# Creating a new session explicitly if needed, but runner might handle it.
|
| 5931 |
+
# Runner.run requires session to exist? define user_id and session_id.
|
| 5932 |
+
# InMemorySessionService usually auto-creates if logic allows, checking Runner code...
|
| 5933 |
+
# Runner.run check: session_service.get_session returns None -> ValueError "Session not found"
|
| 5934 |
+
# So we must create session first.
|
| 5935 |
+
|
| 5936 |
+
session = await session_service.create_session(
|
| 5937 |
+
app_name="mathminds_adk_test",
|
| 5938 |
+
user_id=user_id,
|
| 5939 |
+
session_id=session_id
|
| 5940 |
+
)
|
| 5941 |
+
|
| 5942 |
+
print("Session created. Running agent...")
|
| 5943 |
+
|
| 5944 |
+
# Using run_async for better control
|
| 5945 |
+
response_text = ""
|
| 5946 |
+
async for event in runner.run_async(
|
| 5947 |
+
user_id=user_id,
|
| 5948 |
+
session_id=session_id,
|
| 5949 |
+
new_message=types.Content(role="user", parts=[types.Part.from_text(text=prompt)])
|
| 5950 |
+
):
|
| 5951 |
+
# Inspect event types
|
| 5952 |
+
if event.content and event.content.parts:
|
| 5953 |
+
for part in event.content.parts:
|
| 5954 |
+
if part.text:
|
| 5955 |
+
print(f"Agent partial: {part.text}")
|
| 5956 |
+
response_text += part.text
|
| 5957 |
+
if part.function_call:
|
| 5958 |
+
print(f"Tool Call: {part.function_call.name}({part.function_call.args})")
|
| 5959 |
+
if part.function_response:
|
| 5960 |
+
print(f"Tool Result: {part.function_response.response}")
|
| 5961 |
+
|
| 5962 |
+
print(f"\nFinal Response: {response_text}")
|
| 5963 |
+
|
| 5964 |
+
if __name__ == "__main__":
|
| 5965 |
+
asyncio.run(main())
|
| 5966 |
+
|
| 5967 |
+
|
| 5968 |
+
================================================================================
|
| 5969 |
+
FILE: e:\madhuri\mathminds\tests\test_adk_full.py
|
| 5970 |
+
================================================================================
|
| 5971 |
+
|
| 5972 |
+
|
| 5973 |
+
import asyncio
|
| 5974 |
+
import os
|
| 5975 |
+
import logging
|
| 5976 |
+
from dotenv import load_dotenv
|
| 5977 |
+
|
| 5978 |
+
# Load environment variables
|
| 5979 |
+
load_dotenv()
|
| 5980 |
+
|
| 5981 |
+
# Configure logging
|
| 5982 |
+
logging.basicConfig(level=logging.INFO)
|
| 5983 |
+
logger = logging.getLogger(__name__)
|
| 5984 |
+
|
| 5985 |
+
import sys
|
| 5986 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 5987 |
+
|
| 5988 |
+
# Import the new agent
|
| 5989 |
+
from app.agents.adk_mathminds import MathMindsADKAgent
|
| 5990 |
+
|
| 5991 |
+
async def main():
|
| 5992 |
+
print("Initializing MathMinds ADK Agent...")
|
| 5993 |
+
|
| 5994 |
+
try:
|
| 5995 |
+
agent = MathMindsADKAgent()
|
| 5996 |
+
|
| 5997 |
+
# Test 1: Math Problem
|
| 5998 |
+
prompt_math = "Calculate the derivative of x^2 + 5x."
|
| 5999 |
+
print(f"\nUser: {prompt_math}")
|
| 6000 |
+
response_math = await agent.solve(prompt_math)
|
| 6001 |
+
print(f"Agent Response: {response_math}")
|
| 6002 |
+
|
| 6003 |
+
# Test 2: General Knowledge (Web Search)
|
| 6004 |
+
# Note: Web scraping might fail if not configured or blocked, but agent should handle it gracefully.
|
| 6005 |
+
prompt_search = "What is the capital of France?"
|
| 6006 |
+
print(f"\nUser: {prompt_search}")
|
| 6007 |
+
response_search = await agent.solve(prompt_search)
|
| 6008 |
+
print(f"Agent Response: {response_search}")
|
| 6009 |
+
|
| 6010 |
+
except Exception as e:
|
| 6011 |
+
logger.error(f"Test failed: {e}")
|
| 6012 |
+
|
| 6013 |
+
if __name__ == "__main__":
|
| 6014 |
+
asyncio.run(main())
|
| 6015 |
+
|
| 6016 |
+
|
| 6017 |
+
================================================================================
|
| 6018 |
+
FILE: e:\madhuri\mathminds\tests\test_adk_poc.py
|
| 6019 |
+
================================================================================
|
| 6020 |
+
|
| 6021 |
+
|
| 6022 |
+
import asyncio
|
| 6023 |
+
import os
|
| 6024 |
+
import sys
|
| 6025 |
+
import logging
|
| 6026 |
+
from dotenv import load_dotenv
|
| 6027 |
+
|
| 6028 |
+
# Fix path
|
| 6029 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 6030 |
+
load_dotenv()
|
| 6031 |
+
|
| 6032 |
+
logging.basicConfig(level=logging.INFO)
|
| 6033 |
+
|
| 6034 |
+
try:
|
| 6035 |
+
from google.adk.agents import LlmAgent
|
| 6036 |
+
from google.adk.runners import Runner
|
| 6037 |
+
from google.adk.sessions.in_memory_session_service import InMemorySessionService
|
| 6038 |
+
from google.genai import types
|
| 6039 |
+
except ImportError as e:
|
| 6040 |
+
print(f"Import Error: {e}")
|
| 6041 |
+
sys.exit(1)
|
| 6042 |
+
|
| 6043 |
+
async def main():
|
| 6044 |
+
print("Initializing components...")
|
| 6045 |
+
|
| 6046 |
+
# 1. Agent
|
| 6047 |
+
# Note: LlmAgent arguments might strictly be pydantic fields.
|
| 6048 |
+
# If 'model' is expected to be a string alias, this works.
|
| 6049 |
+
agent = LlmAgent(
|
| 6050 |
+
name="MathTest",
|
| 6051 |
+
model="gemini-2.5-flash",
|
| 6052 |
+
instruction="You are a helpful math assistant."
|
| 6053 |
+
)
|
| 6054 |
+
|
| 6055 |
+
# 2. Session Service
|
| 6056 |
+
session_service = InMemorySessionService()
|
| 6057 |
+
|
| 6058 |
+
# 3. Runner
|
| 6059 |
+
# Assuming Runner takes agent and session_service.
|
| 6060 |
+
# If it needs 'model' client explicitly, we might fail here.
|
| 6061 |
+
try:
|
| 6062 |
+
runner = Runner(agent=agent, app_name="MathMindsPoC", session_service=session_service)
|
| 6063 |
+
except TypeError as e:
|
| 6064 |
+
print(f"Runner init failed: {e}")
|
| 6065 |
+
# Maybe it takes arguments differently?
|
| 6066 |
+
return
|
| 6067 |
+
|
| 6068 |
+
print("Setting up session...")
|
| 6069 |
+
session_id = "poc_session"
|
| 6070 |
+
user_id = "poc_user"
|
| 6071 |
+
|
| 6072 |
+
try:
|
| 6073 |
+
await session_service.create_session(
|
| 6074 |
+
app_name="MathMindsPoC",
|
| 6075 |
+
user_id=user_id,
|
| 6076 |
+
session_id=session_id
|
| 6077 |
+
)
|
| 6078 |
+
except Exception as e:
|
| 6079 |
+
print(f"Session creation failed: {e}")
|
| 6080 |
+
return
|
| 6081 |
+
|
| 6082 |
+
print("Running agent...")
|
| 6083 |
+
|
| 6084 |
+
try:
|
| 6085 |
+
async for event in runner.run_async(
|
| 6086 |
+
user_id=user_id,
|
| 6087 |
+
session_id=session_id,
|
| 6088 |
+
new_message=types.Content(
|
| 6089 |
+
role="user",
|
| 6090 |
+
parts=[types.Part.from_text(text="What is 10 * 10?")]
|
| 6091 |
+
)
|
| 6092 |
+
):
|
| 6093 |
+
# Print the event to see structure
|
| 6094 |
+
print(f"Event: {type(event)}")
|
| 6095 |
+
if hasattr(event, 'content') and event.content:
|
| 6096 |
+
for part in event.content.parts:
|
| 6097 |
+
if part.text:
|
| 6098 |
+
print(f"Text: {part.text}")
|
| 6099 |
+
|
| 6100 |
+
except Exception as e:
|
| 6101 |
+
print(f"Run Error: {e}")
|
| 6102 |
+
import traceback
|
| 6103 |
+
traceback.print_exc()
|
| 6104 |
+
|
| 6105 |
+
if __name__ == "__main__":
|
| 6106 |
+
asyncio.run(main())
|
| 6107 |
+
|
| 6108 |
+
|
| 6109 |
+
================================================================================
|
| 6110 |
+
FILE: e:\madhuri\mathminds\tests\test_api_integration.py
|
| 6111 |
+
================================================================================
|
| 6112 |
+
|
| 6113 |
+
|
| 6114 |
+
import asyncio
|
| 6115 |
+
import os
|
| 6116 |
+
import sys
|
| 6117 |
+
import logging
|
| 6118 |
+
from dotenv import load_dotenv
|
| 6119 |
+
|
| 6120 |
+
# Fix path
|
| 6121 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 6122 |
+
|
| 6123 |
+
# Load environment variables
|
| 6124 |
+
load_dotenv()
|
| 6125 |
+
|
| 6126 |
+
# Configure logging
|
| 6127 |
+
logging.basicConfig(level=logging.INFO)
|
| 6128 |
+
logger = logging.getLogger(__name__)
|
| 6129 |
+
|
| 6130 |
+
from app.core.orchestrator import Orchestrator
|
| 6131 |
+
|
| 6132 |
+
async def main():
|
| 6133 |
+
print("Initializing Orchestrator...")
|
| 6134 |
+
try:
|
| 6135 |
+
orchestrator = Orchestrator()
|
| 6136 |
+
|
| 6137 |
+
# Test: Arithmetic via Agent
|
| 6138 |
+
prompt = "Calculate 15 * 3."
|
| 6139 |
+
print(f"\nUser: {prompt} (Preference: agent)")
|
| 6140 |
+
|
| 6141 |
+
result = await orchestrator.process_problem(
|
| 6142 |
+
text=prompt,
|
| 6143 |
+
model_preference="agent",
|
| 6144 |
+
request_id="test_req_1"
|
| 6145 |
+
)
|
| 6146 |
+
|
| 6147 |
+
print("\nResult:")
|
| 6148 |
+
print(f"Status: {result.get('status')}")
|
| 6149 |
+
print(f"Source: {result.get('source')}")
|
| 6150 |
+
print(f"Answer: {result.get('answer')}")
|
| 6151 |
+
print(f"Model: {result.get('metadata', {}).get('model')}")
|
| 6152 |
+
|
| 6153 |
+
if result.get("source") == "google_adk_agent":
|
| 6154 |
+
print("\nSUCCESS: Routed to Google ADK Agent.")
|
| 6155 |
+
else:
|
| 6156 |
+
print("\nFAILURE: Did not route to Google ADK Agent.")
|
| 6157 |
+
|
| 6158 |
+
except Exception as e:
|
| 6159 |
+
logger.error(f"Test failed: {e}")
|
| 6160 |
+
import traceback
|
| 6161 |
+
traceback.print_exc()
|
| 6162 |
+
|
| 6163 |
+
if __name__ == "__main__":
|
| 6164 |
+
asyncio.run(main())
|
| 6165 |
+
|
| 6166 |
+
|
| 6167 |
================================================================================
|
| 6168 |
FILE: e:\madhuri\mathminds\tests\test_hashing.py
|
| 6169 |
================================================================================
|
|
|
|
| 6571 |
mock_solver.solve.assert_called_with(
|
| 6572 |
"Solve this",
|
| 6573 |
image_data=None,
|
| 6574 |
+
model_name="gemini-2.5-flash"
|
| 6575 |
)
|
| 6576 |
|
| 6577 |
print("\n[PASS] Orchestrator Routing Test Passed!")
|
|
|
|
| 6914 |
unittest.main()
|
| 6915 |
|
| 6916 |
|
| 6917 |
+
================================================================================
|
| 6918 |
+
FILE: e:\madhuri\mathminds\tests\test_session_isolation.py
|
| 6919 |
+
================================================================================
|
| 6920 |
+
|
| 6921 |
+
|
| 6922 |
+
import sys
|
| 6923 |
+
import os
|
| 6924 |
+
sys.path.insert(0, os.getcwd())
|
| 6925 |
+
|
| 6926 |
+
import pytest
|
| 6927 |
+
import asyncio
|
| 6928 |
+
from app.agents.adk_mathminds import MathMindsADKAgent
|
| 6929 |
+
|
| 6930 |
+
@pytest.mark.asyncio
|
| 6931 |
+
async def test_session_isolation():
|
| 6932 |
+
"""
|
| 6933 |
+
Verifies that the ADK agent maintains separate conversation histories
|
| 6934 |
+
for different session IDs.
|
| 6935 |
+
"""
|
| 6936 |
+
agent = MathMindsADKAgent()
|
| 6937 |
+
|
| 6938 |
+
user_id = "test_user_isolation"
|
| 6939 |
+
session_a = "session_A"
|
| 6940 |
+
session_b = "session_B"
|
| 6941 |
+
|
| 6942 |
+
print("\n--- Starting Session Isolation Test ---")
|
| 6943 |
+
|
| 6944 |
+
# 1. Seed Session A
|
| 6945 |
+
print(f"Seeding {session_a} with context 'My name is Alice'...")
|
| 6946 |
+
resp_a1 = await agent.solve(
|
| 6947 |
+
problem="My name is Alice. Remember this.",
|
| 6948 |
+
session_id=session_a,
|
| 6949 |
+
user_id=user_id
|
| 6950 |
+
)
|
| 6951 |
+
print(f"Agent response (A1): {resp_a1}")
|
| 6952 |
+
|
| 6953 |
+
# 2. Seed Session B
|
| 6954 |
+
print(f"Seeding {session_b} with context 'My name is Bob'...")
|
| 6955 |
+
resp_b1 = await agent.solve(
|
| 6956 |
+
problem="My name is Bob. Remember this.",
|
| 6957 |
+
session_id=session_b,
|
| 6958 |
+
user_id=user_id
|
| 6959 |
+
)
|
| 6960 |
+
print(f"Agent response (B1): {resp_b1}")
|
| 6961 |
+
|
| 6962 |
+
# 3. Query Session A
|
| 6963 |
+
print(f"Querying {session_a} for name...")
|
| 6964 |
+
resp_a2 = await agent.solve(
|
| 6965 |
+
problem="What is my name?",
|
| 6966 |
+
session_id=session_a,
|
| 6967 |
+
user_id=user_id
|
| 6968 |
+
)
|
| 6969 |
+
print(f"Agent response (A2): {resp_a2}")
|
| 6970 |
+
|
| 6971 |
+
# 4. Query Session B
|
| 6972 |
+
print(f"Querying {session_b} for name...")
|
| 6973 |
+
resp_b2 = await agent.solve(
|
| 6974 |
+
problem="What is my name?",
|
| 6975 |
+
session_id=session_b,
|
| 6976 |
+
user_id=user_id
|
| 6977 |
+
)
|
| 6978 |
+
print(f"Agent response (B2): {resp_b2}")
|
| 6979 |
+
|
| 6980 |
+
# Assertions
|
| 6981 |
+
assert "Alice" in resp_a2, f"Session A failed to remember Alice. Got: {resp_a2}"
|
| 6982 |
+
assert "Bob" in resp_b2, f"Session B failed to remember Bob. Got: {resp_b2}"
|
| 6983 |
+
assert "Bob" not in resp_a2, "Session A leaked context from Session B!"
|
| 6984 |
+
assert "Alice" not in resp_b2, "Session B leaked context from Session A!"
|
| 6985 |
+
|
| 6986 |
+
print("\nSUCCESS: Sessions are isolated correctly!")
|
| 6987 |
+
|
| 6988 |
+
if __name__ == "__main__":
|
| 6989 |
+
asyncio.run(test_session_isolation())
|
| 6990 |
+
|
| 6991 |
+
|
| 6992 |
================================================================================
|
| 6993 |
FILE: e:\madhuri\mathminds\tests\test_smart_routing.py
|
| 6994 |
================================================================================
|
app/agents/adk_mathminds.py
CHANGED
|
@@ -12,6 +12,7 @@ from google.genai import types
|
|
| 12 |
from app.core.settings import settings
|
| 13 |
from app.tools.web_scraper import WebScraper
|
| 14 |
from app.tools.symbolic_solver import SymbolicSolver
|
|
|
|
| 15 |
from app.core.math_normalizer import MathQueryNormalizer
|
| 16 |
|
| 17 |
logger = logging.getLogger(__name__)
|
|
@@ -31,6 +32,7 @@ class MathMindsADKAgent:
|
|
| 31 |
self.web_scraper = WebScraper(headless=True)
|
| 32 |
self.symbolic_solver = SymbolicSolver()
|
| 33 |
self.normalizer = MathQueryNormalizer()
|
|
|
|
| 34 |
|
| 35 |
# Define Tools as simpler closures
|
| 36 |
# Docs pattern: simple functions, passed in a list.
|
|
@@ -63,18 +65,36 @@ class MathMindsADKAgent:
|
|
| 63 |
else:
|
| 64 |
return f"Error solving math: {result.get('error')}"
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
# Initialize Agent
|
| 67 |
# Using 'Agent' class as per official docs, passing functions directly.
|
| 68 |
self.agent = Agent(
|
| 69 |
name="math_minds_core",
|
| 70 |
model=model_name,
|
| 71 |
-
tools=[web_search, math_solver], # Passed directly as function list
|
| 72 |
instruction=(
|
| 73 |
"You are MathMinds AI, a helpful and precise mathematical assistant. "
|
| 74 |
-
"You have access to tools for solving symbolic math problems
|
| 75 |
"If an image is provided, analyze it mathematically. "
|
| 76 |
"Use 'Math Solver' for distinct math problems (equations, calculus, etc.). "
|
| 77 |
"Use 'Web Search' for real-world data (prices, weather, facts). "
|
|
|
|
| 78 |
"Always explain your steps clearly."
|
| 79 |
)
|
| 80 |
)
|
|
@@ -91,13 +111,12 @@ class MathMindsADKAgent:
|
|
| 91 |
|
| 92 |
logger.info("MathMindsADKAgent initialized successfully (Doc Standard).")
|
| 93 |
|
| 94 |
-
async def solve(self, problem: str, image_data: Optional[str] = None) -> str:
|
| 95 |
"""
|
| 96 |
Main entry point for the agent to solve a problem.
|
| 97 |
"""
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
try:
|
| 102 |
# Ensure session exists (create if not found)
|
| 103 |
try:
|
|
|
|
| 12 |
from app.core.settings import settings
|
| 13 |
from app.tools.web_scraper import WebScraper
|
| 14 |
from app.tools.symbolic_solver import SymbolicSolver
|
| 15 |
+
from app.tools.similarity_search import SimilarProblemFinder
|
| 16 |
from app.core.math_normalizer import MathQueryNormalizer
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
|
|
|
| 32 |
self.web_scraper = WebScraper(headless=True)
|
| 33 |
self.symbolic_solver = SymbolicSolver()
|
| 34 |
self.normalizer = MathQueryNormalizer()
|
| 35 |
+
self.similar_finder = SimilarProblemFinder()
|
| 36 |
|
| 37 |
# Define Tools as simpler closures
|
| 38 |
# Docs pattern: simple functions, passed in a list.
|
|
|
|
| 65 |
else:
|
| 66 |
return f"Error solving math: {result.get('error')}"
|
| 67 |
|
| 68 |
+
def find_similar_problems(query: str) -> str:
|
| 69 |
+
"""
|
| 70 |
+
Useful for finding similar math problems and their solutions from the database to learn how they were solved.
|
| 71 |
+
Use this when you are stuck or want to see examples.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
query: The math problem to find similar examples for.
|
| 75 |
+
"""
|
| 76 |
+
results = self.similar_finder.search(query, limit=2)
|
| 77 |
+
if not results:
|
| 78 |
+
return "No similar problems found."
|
| 79 |
+
|
| 80 |
+
formatted = "Here are some similar problems and their solutions:\n"
|
| 81 |
+
for item in results:
|
| 82 |
+
formatted += f"Problem: {item.get('problem_text')}\nSolution: {item.get('solution_text')}\n---\n"
|
| 83 |
+
return formatted
|
| 84 |
+
|
| 85 |
# Initialize Agent
|
| 86 |
# Using 'Agent' class as per official docs, passing functions directly.
|
| 87 |
self.agent = Agent(
|
| 88 |
name="math_minds_core",
|
| 89 |
model=model_name,
|
| 90 |
+
tools=[web_search, math_solver, find_similar_problems], # Passed directly as function list
|
| 91 |
instruction=(
|
| 92 |
"You are MathMinds AI, a helpful and precise mathematical assistant. "
|
| 93 |
+
"You have access to tools for solving symbolic math problems, searching the web, and finding similar solved problems. "
|
| 94 |
"If an image is provided, analyze it mathematically. "
|
| 95 |
"Use 'Math Solver' for distinct math problems (equations, calculus, etc.). "
|
| 96 |
"Use 'Web Search' for real-world data (prices, weather, facts). "
|
| 97 |
+
"Use 'Find Similar Problems' to look up examples if you are unsure how to solve a problem. "
|
| 98 |
"Always explain your steps clearly."
|
| 99 |
)
|
| 100 |
)
|
|
|
|
| 111 |
|
| 112 |
logger.info("MathMindsADKAgent initialized successfully (Doc Standard).")
|
| 113 |
|
| 114 |
+
async def solve(self, problem: str, image_data: Optional[str] = None, session_id: str = "default_session", user_id: str = "default_user") -> str:
|
| 115 |
"""
|
| 116 |
Main entry point for the agent to solve a problem.
|
| 117 |
"""
|
| 118 |
+
# IDs are now passed in, with fallbacks for backward compatibility
|
| 119 |
+
|
|
|
|
| 120 |
try:
|
| 121 |
# Ensure session exists (create if not found)
|
| 122 |
try:
|
app/core/orchestrator.py
CHANGED
|
@@ -1,41 +1,20 @@
|
|
|
|
|
| 1 |
import logging
|
| 2 |
import time
|
| 3 |
-
import
|
| 4 |
-
import re
|
| 5 |
-
from typing import Any, Dict, Optional, List
|
| 6 |
-
from enum import Enum
|
| 7 |
|
| 8 |
from app.core.input_processor import InputProcessor
|
| 9 |
from app.memory.cache import CacheManager
|
| 10 |
from app.memory.database import DatabaseManager
|
| 11 |
-
from app.models.gemini import GeminiModel
|
| 12 |
-
from app.models.qwen import QwenModel
|
| 13 |
-
from app.utils.hashing import generate_problem_hash
|
| 14 |
-
from app.validation.answer_checker import AnswerValidator
|
| 15 |
-
from app.tools.web_scraper import WebScraper
|
| 16 |
-
from app.worker import scrape_web_task # Celery Task
|
| 17 |
-
from app.tools.symbolic_solver import SymbolicSolver
|
| 18 |
-
from app.tools.vision_analyzer import VisionAnalyzer
|
| 19 |
-
from app.tools.similarity_search import SimilarProblemFinder
|
| 20 |
-
from app.core.math_normalizer import MathQueryNormalizer
|
| 21 |
-
from app.core.settings import settings
|
| 22 |
from app.agents.adk_mathminds import MathMindsADKAgent
|
|
|
|
| 23 |
|
| 24 |
logger = logging.getLogger(__name__)
|
| 25 |
|
| 26 |
-
# --- Intent Enums ---
|
| 27 |
-
class IntentType(Enum):
|
| 28 |
-
ARITHMETIC = "arithmetic"
|
| 29 |
-
SYMBOLIC_MATH = "symbolic_math"
|
| 30 |
-
VISION = "vision"
|
| 31 |
-
SEARCH = "search"
|
| 32 |
-
CONCEPTUAL = "conceptual"
|
| 33 |
-
UNKNOWN = "unknown"
|
| 34 |
-
|
| 35 |
class Orchestrator:
|
| 36 |
"""
|
| 37 |
-
|
| 38 |
-
|
| 39 |
"""
|
| 40 |
|
| 41 |
def __init__(self, cache_manager: Optional[CacheManager] = None, db_manager: Optional[DatabaseManager] = None):
|
|
@@ -44,82 +23,25 @@ class Orchestrator:
|
|
| 44 |
self.cache_manager = cache_manager or CacheManager()
|
| 45 |
self.db_manager = db_manager or DatabaseManager()
|
| 46 |
|
| 47 |
-
#
|
| 48 |
-
self.gemini = GeminiModel()
|
| 49 |
-
self.qwen = QwenModel()
|
| 50 |
-
|
| 51 |
-
# Tools
|
| 52 |
-
self.web_scraper = WebScraper()
|
| 53 |
-
self.symbolic_solver = SymbolicSolver()
|
| 54 |
-
self.vision_analyzer = VisionAnalyzer()
|
| 55 |
-
self.similarity_finder = SimilarProblemFinder()
|
| 56 |
-
self.math_normalizer = MathQueryNormalizer()
|
| 57 |
-
|
| 58 |
-
# Agents
|
| 59 |
self.adk_agent = MathMindsADKAgent()
|
| 60 |
|
| 61 |
except Exception as e:
|
| 62 |
logger.critical(f"Failed to initialize Orchestrator: {e}")
|
| 63 |
raise
|
| 64 |
|
| 65 |
-
def _classify_intent(self, text: str, has_image: bool) -> IntentType:
|
| 66 |
-
"""
|
| 67 |
-
Fast, rule-based intent classifier. No LLM used here.
|
| 68 |
-
"""
|
| 69 |
-
if has_image:
|
| 70 |
-
return IntentType.VISION
|
| 71 |
-
|
| 72 |
-
if not text:
|
| 73 |
-
return IntentType.UNKNOWN
|
| 74 |
-
|
| 75 |
-
text = text.lower().strip()
|
| 76 |
-
|
| 77 |
-
# 1. Search Intent
|
| 78 |
-
if any(w in text for w in ["price of", "news", "latest", "who is", "weather", "stock", "search for"]):
|
| 79 |
-
return IntentType.SEARCH
|
| 80 |
-
|
| 81 |
-
# 2. Arithmetic (Target Symbolic)
|
| 82 |
-
# Check if purely numbers/operators/basic math keywords
|
| 83 |
-
if re.match(r'^[\d\s\+\-\*\/\^\(\)\.\=]+$', text):
|
| 84 |
-
return IntentType.ARITHMETIC
|
| 85 |
-
|
| 86 |
-
# 3. Symbolic Math (Target Symbolic)
|
| 87 |
-
math_keywords = ["solve", "integrate", "derive", "derivative", "limit", "sum", "simplify", "factor", "equation", "latex"]
|
| 88 |
-
if any(w in text for w in math_keywords) or "=" in text or "\\" in text: # Latex has backslashes
|
| 89 |
-
return IntentType.SYMBOLIC_MATH
|
| 90 |
-
|
| 91 |
-
# 4. Conceptual/General (Target LLM)
|
| 92 |
-
return IntentType.CONCEPTUAL
|
| 93 |
-
|
| 94 |
async def process_problem(self, text: Optional[str] = None, image: Optional[str] = None, request_id: Optional[str] = None, model_preference: str = "fast", session_id: Optional[str] = None, user_id: Optional[str] = None) -> Dict[str, Any]:
|
| 95 |
"""
|
| 96 |
-
|
| 97 |
"""
|
| 98 |
start_time = time.time()
|
| 99 |
request_id = request_id or "unknown"
|
| 100 |
|
| 101 |
-
|
| 102 |
-
# --- User Context ---
|
| 103 |
-
user_context_str = ""
|
| 104 |
-
if user_id:
|
| 105 |
-
try:
|
| 106 |
-
profile = self.db_manager.get_user_profile(user_id)
|
| 107 |
-
if profile:
|
| 108 |
-
level = profile.get("math_level", "Student")
|
| 109 |
-
interests = ", ".join(profile.get("interests", []))
|
| 110 |
-
user_context_str = f"User Profile: {level} level."
|
| 111 |
-
if interests:
|
| 112 |
-
user_context_str += f" Interests: {interests}."
|
| 113 |
-
user_context_str += " Adjust explanation complexity to match this level."
|
| 114 |
-
except Exception as e:
|
| 115 |
-
logger.warning(f"Failed to fetch profile in orchestrator: {e}")
|
| 116 |
-
|
| 117 |
-
# --- Strict Output Schema ---
|
| 118 |
result_schema = {
|
| 119 |
"request_id": request_id,
|
| 120 |
"status": "error",
|
| 121 |
-
"
|
| 122 |
-
"source": "unknown",
|
| 123 |
"answer": None,
|
| 124 |
"steps": [],
|
| 125 |
"explanation": None,
|
|
@@ -127,7 +49,7 @@ class Orchestrator:
|
|
| 127 |
"cached": False,
|
| 128 |
"metadata": {
|
| 129 |
"latency_ms": 0,
|
| 130 |
-
"model":
|
| 131 |
"tools_used": []
|
| 132 |
}
|
| 133 |
}
|
|
@@ -139,259 +61,52 @@ class Orchestrator:
|
|
| 139 |
result_schema["explanation"] = processed.error_message
|
| 140 |
return self._finalize_result(result_schema, start_time)
|
| 141 |
|
| 142 |
-
# 2.
|
| 143 |
-
|
| 144 |
-
try:
|
| 145 |
-
logger.info("Routing to Google ADK Agent")
|
| 146 |
-
agent_res = await self.adk_agent.solve(processed.cleaned_content, processed.metadata.get("image_data"))
|
| 147 |
-
|
| 148 |
-
# Agent returns a string usually, we need to wrap it
|
| 149 |
-
result_schema["status"] = "success"
|
| 150 |
-
result_schema["source"] = "google_adk_agent"
|
| 151 |
-
result_schema["answer"] = agent_res
|
| 152 |
-
result_schema["explanation"] = "Solved by AI Agent using tools (Google ADK)."
|
| 153 |
-
result_schema["metadata"]["model"] = "gemini-flash-adk"
|
| 154 |
-
|
| 155 |
-
return self._finalize_result(result_schema, start_time)
|
| 156 |
-
except Exception as e:
|
| 157 |
-
logger.error(f"Agent failed: {e}")
|
| 158 |
-
result_schema["error"] = str(e)
|
| 159 |
-
# Fallback to standard flow? No, report error for explicit preference.
|
| 160 |
-
return self._finalize_result(result_schema, start_time)
|
| 161 |
-
|
| 162 |
-
# 3. Hashing & Cache
|
| 163 |
-
image_data = processed.metadata.get("image_data")
|
| 164 |
-
p_hash = generate_problem_hash(processed.cleaned_content, image_data)
|
| 165 |
-
lock_acquired = False
|
| 166 |
-
lock_key = f"lock:{p_hash}"
|
| 167 |
-
|
| 168 |
-
if settings.ENABLE_CACHE:
|
| 169 |
-
cached = self.cache_manager.get_cached_answer(p_hash)
|
| 170 |
-
if cached:
|
| 171 |
-
# Hydrate schema from cache
|
| 172 |
-
result_schema.update(cached)
|
| 173 |
-
result_schema["status"] = "success"
|
| 174 |
-
result_schema["cached"] = True
|
| 175 |
-
result_schema["source"] = "cache"
|
| 176 |
-
return self._finalize_result(result_schema, start_time)
|
| 177 |
-
|
| 178 |
-
# --- CACHE STAMPEDE PROTECTION ---
|
| 179 |
-
# Try to acquire a lock to prevent multiple workers from solving the same problem
|
| 180 |
-
if self.cache_manager.redis_client:
|
| 181 |
-
# Try to acquire lock (set if not exists with 300s TTL)
|
| 182 |
-
is_locked = self.cache_manager.redis_client.set(lock_key, "locked", ex=300, nx=True)
|
| 183 |
-
|
| 184 |
-
if is_locked:
|
| 185 |
-
lock_acquired = True
|
| 186 |
-
else:
|
| 187 |
-
# Lock exists -> another process is working. Wait for it.
|
| 188 |
-
logger.info(f"Problem {p_hash[:8]} is being processed by another worker. Waiting...")
|
| 189 |
-
for _ in range(300): # Wait up to 60 seconds (300 * 0.2)
|
| 190 |
-
await asyncio.sleep(0.2)
|
| 191 |
-
# Check cache again
|
| 192 |
-
cached = self.cache_manager.get_cached_answer(p_hash)
|
| 193 |
-
if cached:
|
| 194 |
-
logger.debug("Cache populated while waiting. Returning result.") # Using debug to reduce noise
|
| 195 |
-
result_schema.update(cached)
|
| 196 |
-
result_schema["status"] = "success"
|
| 197 |
-
result_schema["cached"] = True
|
| 198 |
-
result_schema["source"] = "cache"
|
| 199 |
-
return self._finalize_result(result_schema, start_time)
|
| 200 |
-
|
| 201 |
-
# Timeout reached. One last check before giving up.
|
| 202 |
-
cached_final = self.cache_manager.get_cached_answer(p_hash)
|
| 203 |
-
if cached_final:
|
| 204 |
-
logger.info("Cache populated just in time. Returning result.")
|
| 205 |
-
result_schema.update(cached_final)
|
| 206 |
-
result_schema["status"] = "success"
|
| 207 |
-
result_schema["cached"] = True
|
| 208 |
-
result_schema["source"] = "cache"
|
| 209 |
-
return self._finalize_result(result_schema, start_time)
|
| 210 |
-
|
| 211 |
-
# Still nothing? Fail Open.
|
| 212 |
-
logger.warning(f"Timeout waiting for lock on {p_hash[:8]}. Proceeding to compute locally (Fail Open).")
|
| 213 |
-
# We proceed to solve it ourselves. lock_acquired is False, so we won't release the other worker's lock.
|
| 214 |
|
| 215 |
-
#
|
| 216 |
-
|
| 217 |
-
# ... existing logic follows ...
|
| 218 |
-
pass
|
| 219 |
-
except Exception:
|
| 220 |
-
raise
|
| 221 |
-
# Note: The 'finally' block to release lock needs to wrap the entire solve process.
|
| 222 |
-
# Since I can't easily wrap the *rest* of the function without indenting everything,
|
| 223 |
-
# I will release the lock explicitly before return points or use a flag.
|
| 224 |
-
# Actually, to generate correct code structure with replacement, I need to wrap the rest.
|
| 225 |
-
# ALTERNATIVE: I will insert the 'acquire' here, and handling 'release' might be tricky with ReplaceFileContent if I don't re-indent headers.
|
| 226 |
-
# Strategy: I'll use the 'lock' only for the Heavy LLM parts?
|
| 227 |
-
# No, strictly strictly, I should wrap.
|
| 228 |
-
# Let's simple release the lock at the end of the function.
|
| 229 |
-
# I'll enable a flag `self.has_lock = True` if I acquired it. And in `finally` of the whole block I release it.
|
| 230 |
-
# Wait, `process_problem` has a big `try/except`. I can use that.
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
# 3. Classification
|
| 234 |
-
has_image = bool(processed.metadata.get("image_data"))
|
| 235 |
-
image_data = processed.metadata.get("image_data")
|
| 236 |
-
intent = self._classify_intent(processed.cleaned_content, has_image)
|
| 237 |
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
# Analyze Image
|
| 246 |
-
vision_res = await asyncio.to_thread(self.vision_analyzer.analyze, image_data)
|
| 247 |
-
|
| 248 |
-
if vision_res.get("math_detected"):
|
| 249 |
-
# Extracted math text -> Solve Symbolically
|
| 250 |
-
math_text = vision_res.get("latex", "") or vision_res.get("text", "")
|
| 251 |
-
logger.info(f"Vision detected math: {math_text}")
|
| 252 |
-
|
| 253 |
-
sym_res = await asyncio.to_thread(self.symbolic_solver.solve, math_text)
|
| 254 |
-
if sym_res.get("status") == "success":
|
| 255 |
-
self._populate_success(result_schema, sym_res, "vision+symbolic")
|
| 256 |
-
result_schema["steps"] = ["Analyzed image with YOLO/OCR", f"Extracted: {math_text}"] + sym_res.get("steps", [])
|
| 257 |
-
else:
|
| 258 |
-
# Fallback to Gemini with Image
|
| 259 |
-
gem_res = await self._safe_llm_call(processed.cleaned_content, image_data=image_data)
|
| 260 |
-
self._populate_success(result_schema, gem_res, "vision+gemini")
|
| 261 |
-
else:
|
| 262 |
-
# General Image -> Gemini
|
| 263 |
-
gem_res = await self._safe_llm_call(processed.cleaned_content, image_data=image_data)
|
| 264 |
-
self._populate_success(result_schema, gem_res, "gemini-vision")
|
| 265 |
-
|
| 266 |
-
# --- ROUTE: MATH (Symbolic/Arithmetic) ---
|
| 267 |
-
elif intent in [IntentType.SYMBOLIC_MATH, IntentType.ARITHMETIC]:
|
| 268 |
-
# Try Symbolic Solver First
|
| 269 |
-
normalized = self.math_normalizer.normalize(processed.cleaned_content)
|
| 270 |
-
target = normalized if normalized else processed.cleaned_content
|
| 271 |
-
|
| 272 |
-
sym_res = await asyncio.to_thread(self.symbolic_solver.solve, target)
|
| 273 |
-
|
| 274 |
-
if sym_res.get("status") == "success":
|
| 275 |
-
self._populate_success(result_schema, sym_res, "symbolic_solver")
|
| 276 |
-
else:
|
| 277 |
-
# Fallback to Gemini
|
| 278 |
-
logger.info("Symbolic solver failed, falling back to Gemini.")
|
| 279 |
-
|
| 280 |
-
fallback_prompt = processed.cleaned_content
|
| 281 |
-
if user_context_str:
|
| 282 |
-
fallback_prompt = f"{user_context_str}\n\nProblem: {processed.cleaned_content}"
|
| 283 |
-
|
| 284 |
-
gem_res = await self._safe_llm_call(fallback_prompt)
|
| 285 |
-
self._populate_success(result_schema, gem_res, "gemini-fallback")
|
| 286 |
-
|
| 287 |
-
# --- ROUTE: SEARCH ---
|
| 288 |
-
elif intent == IntentType.SEARCH:
|
| 289 |
-
# Scrape via Celery
|
| 290 |
-
try:
|
| 291 |
-
task = scrape_web_task.delay(processed.cleaned_content)
|
| 292 |
-
# Wait for result with timeout (blocking the request, but offloading CPU)
|
| 293 |
-
scrape_res = task.get(timeout=30)
|
| 294 |
-
except Exception as e:
|
| 295 |
-
logger.error(f"Celery scrape task failed: {e}")
|
| 296 |
-
scrape_res = {"content": "", "error": str(e)}
|
| 297 |
-
|
| 298 |
-
context = scrape_res.get("content", "")[:3000] # Limit context
|
| 299 |
-
|
| 300 |
-
# Summarize with Gemini
|
| 301 |
-
summary_prompt = f"Using this search data: {context}\n\nAnswer: {processed.cleaned_content}"
|
| 302 |
-
if user_context_str:
|
| 303 |
-
summary_prompt = f"{user_context_str}\n\n{summary_prompt}"
|
| 304 |
-
|
| 305 |
-
gem_res = await self._safe_llm_call(summary_prompt)
|
| 306 |
|
| 307 |
-
|
| 308 |
-
result_schema["
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
else:
|
| 312 |
-
# Direct Gemini Call
|
| 313 |
-
prompt = processed.cleaned_content
|
| 314 |
-
if user_context_str:
|
| 315 |
-
prompt = f"{user_context_str}\n\nProblem: {processed.cleaned_content}"
|
| 316 |
|
| 317 |
-
|
| 318 |
-
|
|
|
|
|
|
|
| 319 |
|
| 320 |
-
#
|
| 321 |
if result_schema["status"] == "success":
|
| 322 |
-
# Save to Redis Cache (CRITICAL for lock waiters!)
|
| 323 |
if settings.ENABLE_CACHE:
|
| 324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
-
# Save to DB
|
| 327 |
self.db_manager.save_problem(
|
| 328 |
-
{"
|
| 329 |
result_schema
|
| 330 |
)
|
| 331 |
-
# Index
|
| 332 |
-
if self.similarity_finder and result_schema["answer"]:
|
| 333 |
-
self.similarity_finder.index_problem(
|
| 334 |
-
processed.cleaned_content,
|
| 335 |
-
str(result_schema["answer"]),
|
| 336 |
-
{"model": result_schema["metadata"]["model"]}
|
| 337 |
-
)
|
| 338 |
-
|
| 339 |
-
# Release lock if we acquired it
|
| 340 |
-
if lock_acquired and self.cache_manager.redis_client:
|
| 341 |
-
try:
|
| 342 |
-
self.cache_manager.redis_client.delete(lock_key)
|
| 343 |
-
except Exception as e:
|
| 344 |
-
logger.warning(f"Failed to release lock {lock_key}: {e}")
|
| 345 |
|
| 346 |
return self._finalize_result(result_schema, start_time)
|
| 347 |
|
| 348 |
except Exception as e:
|
| 349 |
-
logger.error(f"Orchestrator Error: {e}")
|
| 350 |
-
|
| 351 |
-
# Release lock on error too
|
| 352 |
-
if locals().get("lock_acquired") and self.cache_manager.redis_client:
|
| 353 |
-
try:
|
| 354 |
-
self.cache_manager.redis_client.delete(lock_key)
|
| 355 |
-
except Exception as release_err:
|
| 356 |
-
logger.warning(f"Failed to release lock {lock_key} in error handler: {release_err}")
|
| 357 |
-
|
| 358 |
result_schema["explanation"] = f"Internal Error: {str(e)}"
|
| 359 |
return self._finalize_result(result_schema, start_time)
|
| 360 |
|
| 361 |
-
async def _safe_llm_call(self, prompt: str, image_data: Optional[str] = None, **kwargs) -> Dict[str, Any]:
|
| 362 |
-
"""
|
| 363 |
-
Tries Gemini first. If 429/Resource Exhausted, falls back to local Qwen.
|
| 364 |
-
"""
|
| 365 |
-
try:
|
| 366 |
-
return await self.gemini.solve(prompt, image_data, **kwargs)
|
| 367 |
-
except Exception as e:
|
| 368 |
-
error_str = str(e)
|
| 369 |
-
if "429" in error_str or "RESOURCE_EXHAUSTED" in error_str:
|
| 370 |
-
logger.warning(f"Gemini Rate Limit (429) hit. Falling back to local Qwen. Error: {e}")
|
| 371 |
-
# Fallback to Qwen
|
| 372 |
-
# Qwen might not support images, so we strip it.
|
| 373 |
-
try:
|
| 374 |
-
return await self.qwen.solve(prompt, image_data=None)
|
| 375 |
-
except Exception as qwen_error:
|
| 376 |
-
logger.error(f"Fallback to Qwen also failed: {qwen_error}")
|
| 377 |
-
raise e # Raise original Gemini error if fallback fails to indicate overloaded state.
|
| 378 |
-
|
| 379 |
-
# If not a rate limit error, re-raise immediately
|
| 380 |
-
raise e
|
| 381 |
-
|
| 382 |
-
def _populate_success(self, schema: Dict, source_res: Dict, source_name: str):
|
| 383 |
-
"""Helper to map source result to unified schema."""
|
| 384 |
-
schema["status"] = "success"
|
| 385 |
-
schema["source"] = source_name
|
| 386 |
-
schema["answer"] = source_res.get("final_answer") or source_res.get("content") or source_res.get("text")
|
| 387 |
-
# Ensure LaTeX
|
| 388 |
-
schema["answer_latex"] = source_res.get("latex", schema["answer"]) # store latent for UI
|
| 389 |
-
schema["steps"] = source_res.get("steps", [])
|
| 390 |
-
if "reasoning" in source_res:
|
| 391 |
-
schema["explanation"] = source_res["reasoning"]
|
| 392 |
-
schema["confidence"] = source_res.get("confidence_score", 1.0)
|
| 393 |
-
schema["metadata"]["model"] = source_res.get("model", "unknown")
|
| 394 |
-
|
| 395 |
def _finalize_result(self, schema: Dict, start_time: float) -> Dict:
|
| 396 |
"""Calculates latency and returns final dict."""
|
| 397 |
schema["metadata"]["latency_ms"] = int((time.time() - start_time) * 1000)
|
|
|
|
| 1 |
+
|
| 2 |
import logging
|
| 3 |
import time
|
| 4 |
+
from typing import Any, Dict, Optional
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
from app.core.input_processor import InputProcessor
|
| 7 |
from app.memory.cache import CacheManager
|
| 8 |
from app.memory.database import DatabaseManager
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from app.agents.adk_mathminds import MathMindsADKAgent
|
| 10 |
+
from app.core.settings import settings
|
| 11 |
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
class Orchestrator:
|
| 15 |
"""
|
| 16 |
+
Simplified Orchestrator for MathMinds AI (Pure ADK Architecture).
|
| 17 |
+
Delegates all reasoning and tool usage to the MathMindsADKAgent.
|
| 18 |
"""
|
| 19 |
|
| 20 |
def __init__(self, cache_manager: Optional[CacheManager] = None, db_manager: Optional[DatabaseManager] = None):
|
|
|
|
| 23 |
self.cache_manager = cache_manager or CacheManager()
|
| 24 |
self.db_manager = db_manager or DatabaseManager()
|
| 25 |
|
| 26 |
+
# The Single Source of Truth
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
self.adk_agent = MathMindsADKAgent()
|
| 28 |
|
| 29 |
except Exception as e:
|
| 30 |
logger.critical(f"Failed to initialize Orchestrator: {e}")
|
| 31 |
raise
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
async def process_problem(self, text: Optional[str] = None, image: Optional[str] = None, request_id: Optional[str] = None, model_preference: str = "fast", session_id: Optional[str] = None, user_id: Optional[str] = None) -> Dict[str, Any]:
|
| 34 |
"""
|
| 35 |
+
Streamlined Pipeline: Input -> Agent -> Output.
|
| 36 |
"""
|
| 37 |
start_time = time.time()
|
| 38 |
request_id = request_id or "unknown"
|
| 39 |
|
| 40 |
+
# Default Schema
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
result_schema = {
|
| 42 |
"request_id": request_id,
|
| 43 |
"status": "error",
|
| 44 |
+
"source": "google_adk_agent",
|
|
|
|
| 45 |
"answer": None,
|
| 46 |
"steps": [],
|
| 47 |
"explanation": None,
|
|
|
|
| 49 |
"cached": False,
|
| 50 |
"metadata": {
|
| 51 |
"latency_ms": 0,
|
| 52 |
+
"model": "gemini-flash-adk",
|
| 53 |
"tools_used": []
|
| 54 |
}
|
| 55 |
}
|
|
|
|
| 61 |
result_schema["explanation"] = processed.error_message
|
| 62 |
return self._finalize_result(result_schema, start_time)
|
| 63 |
|
| 64 |
+
# 2. Agent Execution
|
| 65 |
+
logger.info("Routing request to ADK Agent")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
+
# Pass image data if available
|
| 68 |
+
image_data_b64 = processed.metadata.get("image_data")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
+
try:
|
| 71 |
+
agent_response = await self.adk_agent.solve(
|
| 72 |
+
problem=processed.cleaned_content,
|
| 73 |
+
image_data=image_data_b64,
|
| 74 |
+
session_id=session_id or "default_session",
|
| 75 |
+
user_id=user_id or "default_user"
|
| 76 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
+
result_schema["status"] = "success"
|
| 79 |
+
result_schema["answer"] = agent_response
|
| 80 |
+
result_schema["explanation"] = "Processed by MathMinds ADK Agent."
|
| 81 |
+
result_schema["confidence"] = 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
except Exception as e:
|
| 84 |
+
logger.error(f"ADK Agent execution failed: {e}")
|
| 85 |
+
result_schema["explanation"] = f"Agent Error: {str(e)}"
|
| 86 |
+
return self._finalize_result(result_schema, start_time)
|
| 87 |
|
| 88 |
+
# 3. Persistence (Cache & DB)
|
| 89 |
if result_schema["status"] == "success":
|
|
|
|
| 90 |
if settings.ENABLE_CACHE:
|
| 91 |
+
# Simple hash for caching (content + image)
|
| 92 |
+
# Note: In a real agent scenario, caching entire conversations is complex.
|
| 93 |
+
# We skip aggressive caching for now to rely on Agent's session memory,
|
| 94 |
+
# or we cache only exact single-turn queries if needed.
|
| 95 |
+
pass
|
| 96 |
|
| 97 |
+
# Save to DB for history
|
| 98 |
self.db_manager.save_problem(
|
| 99 |
+
{"content": processed.cleaned_content},
|
| 100 |
result_schema
|
| 101 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
return self._finalize_result(result_schema, start_time)
|
| 104 |
|
| 105 |
except Exception as e:
|
| 106 |
+
logger.error(f"Orchestrator Critical Error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
result_schema["explanation"] = f"Internal Error: {str(e)}"
|
| 108 |
return self._finalize_result(result_schema, start_time)
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
def _finalize_result(self, schema: Dict, start_time: float) -> Dict:
|
| 111 |
"""Calculates latency and returns final dict."""
|
| 112 |
schema["metadata"]["latency_ms"] = int((time.time() - start_time) * 1000)
|
app/models/gemini.py
DELETED
|
@@ -1,230 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import logging
|
| 3 |
-
import re
|
| 4 |
-
import base64
|
| 5 |
-
import asyncio
|
| 6 |
-
from typing import Any, Dict, Optional
|
| 7 |
-
|
| 8 |
-
from google import genai
|
| 9 |
-
from google.genai import types
|
| 10 |
-
import pybreaker
|
| 11 |
-
from tenacity import retry, stop_after_attempt, wait_random_exponential, retry_if_exception_type
|
| 12 |
-
|
| 13 |
-
from app.core.settings import settings
|
| 14 |
-
from app.models.base import BaseModel
|
| 15 |
-
|
| 16 |
-
# Configure logging
|
| 17 |
-
logger = logging.getLogger(__name__)
|
| 18 |
-
|
| 19 |
-
# Global Semaphore to prevent rate limit exhaustion
|
| 20 |
-
_GEMINI_LOCK = asyncio.Semaphore(1)
|
| 21 |
-
|
| 22 |
-
class GeminiModel(BaseModel):
|
| 23 |
-
"""
|
| 24 |
-
Wrapper for the Gemini API using the new google-genai SDK (v1.0+).
|
| 25 |
-
Enforces structured output, cleans unicode, and handles retries/timeouts.
|
| 26 |
-
"""
|
| 27 |
-
|
| 28 |
-
def __init__(self, api_key: Optional[str] = None, model_name: str = "gemini-2.5-flash"):
|
| 29 |
-
"""
|
| 30 |
-
Initialize the GeminiModel.
|
| 31 |
-
|
| 32 |
-
Args:
|
| 33 |
-
api_key: Gemini API key. Defaults to settings.GOOGLE_API_KEY.
|
| 34 |
-
model_name: Model to use. Defaults to gemini-flash-latest.
|
| 35 |
-
"""
|
| 36 |
-
self.api_key = api_key or settings.GOOGLE_API_KEY
|
| 37 |
-
if not self.api_key:
|
| 38 |
-
logger.warning("No API key provided for GeminiModel. Calls will fail.")
|
| 39 |
-
|
| 40 |
-
self.client = genai.Client(api_key=self.api_key)
|
| 41 |
-
self.model_name = model_name
|
| 42 |
-
|
| 43 |
-
# Initialize Circuit Breaker
|
| 44 |
-
# Trips after 5 consecutive failures, resets after 60 seconds
|
| 45 |
-
self.breaker = pybreaker.CircuitBreaker(
|
| 46 |
-
fail_max=5,
|
| 47 |
-
reset_timeout=60,
|
| 48 |
-
listeners=[pybreaker.CircuitBreakerListener()] # Optional: add listeners for logging
|
| 49 |
-
)
|
| 50 |
-
|
| 51 |
-
def _clean_text(self, text: str) -> str:
|
| 52 |
-
"""
|
| 53 |
-
Sanitizes text by removing spaced letters and normalizing unicode math to ASCII/LaTeX.
|
| 54 |
-
"""
|
| 55 |
-
if not isinstance(text, str):
|
| 56 |
-
return text
|
| 57 |
-
|
| 58 |
-
text = re.sub(r'(?<=\b\w) (?=\w\b)', '', text)
|
| 59 |
-
|
| 60 |
-
replacements = {
|
| 61 |
-
"−": "-", "∞": "\\infty", "𝑓": "f", "𝑠": "s", "𝑡": "t",
|
| 62 |
-
"𝐿": "L", "𝐹": "F", "𝑒": "e", "∫": "\\int", "∂": "\\partial",
|
| 63 |
-
"∑": "\\sum", "∏": "\\prod", "√": "\\sqrt",
|
| 64 |
-
}
|
| 65 |
-
for k, v in replacements.items():
|
| 66 |
-
text = text.replace(k, v)
|
| 67 |
-
return text
|
| 68 |
-
|
| 69 |
-
def _safe_parse_json(self, text: str) -> Optional[Dict[str, Any]]:
|
| 70 |
-
try:
|
| 71 |
-
return json.loads(text)
|
| 72 |
-
except json.JSONDecodeError:
|
| 73 |
-
match = re.search(r"\{.*\}", text, re.DOTALL)
|
| 74 |
-
if match:
|
| 75 |
-
try:
|
| 76 |
-
return json.loads(match.group())
|
| 77 |
-
except json.JSONDecodeError:
|
| 78 |
-
pass
|
| 79 |
-
return None
|
| 80 |
-
|
| 81 |
-
# Removed @retry to prevent 429 stampedes
|
| 82 |
-
async def solve(self, prompt: str, image_data: Optional[str] = None, **kwargs) -> Dict[str, Any]:
|
| 83 |
-
"""
|
| 84 |
-
Solves a math problem using Gemini with timeout protection and concurrency control.
|
| 85 |
-
"""
|
| 86 |
-
model_name = kwargs.get("model_name")
|
| 87 |
-
|
| 88 |
-
# Global Concurrency Lock (One request at a time)
|
| 89 |
-
async with _GEMINI_LOCK:
|
| 90 |
-
return await self.breaker.call(
|
| 91 |
-
self._solve_with_timeout,
|
| 92 |
-
prompt,
|
| 93 |
-
image_data,
|
| 94 |
-
model_name,
|
| 95 |
-
timeout=60
|
| 96 |
-
)
|
| 97 |
-
|
| 98 |
-
async def _solve_with_timeout(self, prompt: str, image_data: Optional[str] = None, model_name: Optional[str] = None, timeout: int = 60) -> Dict[str, Any]:
|
| 99 |
-
try:
|
| 100 |
-
result = await asyncio.wait_for(
|
| 101 |
-
asyncio.to_thread(self._solve_internal, prompt, image_data, model_name),
|
| 102 |
-
timeout=timeout
|
| 103 |
-
)
|
| 104 |
-
return result
|
| 105 |
-
except asyncio.TimeoutError:
|
| 106 |
-
raise TimeoutError(f"Gemini API did not respond within {timeout}s")
|
| 107 |
-
|
| 108 |
-
def _solve_internal(self, prompt: str, image_data: Optional[str] = None, model_name: Optional[str] = None) -> Dict[str, Any]:
|
| 109 |
-
if not prompt and not image_data:
|
| 110 |
-
raise ValueError("Input cannot be empty.")
|
| 111 |
-
|
| 112 |
-
target_model = model_name or self.model_name
|
| 113 |
-
|
| 114 |
-
# We wrap the user prompt in our system instruction here
|
| 115 |
-
full_prompt = f"""
|
| 116 |
-
You are a high-efficiency multimodal math solver. Output strictly valid JSON.
|
| 117 |
-
|
| 118 |
-
Strategy: "Think Aloud -> Extract -> Solve -> Verify -> Box Answer"
|
| 119 |
-
|
| 120 |
-
Format:
|
| 121 |
-
{{
|
| 122 |
-
"latex": "The exact problem statement in LaTeX",
|
| 123 |
-
"reasoning": "Step-by-step logical derivation. Use standard sentences. Wrap formulas in $...$. Explain identifying the problem type (e.g. Geometry, Algebra).",
|
| 124 |
-
"final_answer": "The bare result (boxed in \\boxed{{...}})",
|
| 125 |
-
"confidence_score": 0.0-1.0
|
| 126 |
-
}}
|
| 127 |
-
|
| 128 |
-
STRICT FORMATTING RULES:
|
| 129 |
-
- Use ASCII characters only.
|
| 130 |
-
- Use LaTeX for formulas.
|
| 131 |
-
- Wrap all formulas in $...$ or $$...$$
|
| 132 |
-
- Do NOT use unicode math symbols.
|
| 133 |
-
- Do NOT insert unnecessary spaces within words.
|
| 134 |
-
- Do NOT insert newlines inside formulas.
|
| 135 |
-
|
| 136 |
-
Problem Context:
|
| 137 |
-
{prompt}
|
| 138 |
-
"""
|
| 139 |
-
|
| 140 |
-
contents = [full_prompt]
|
| 141 |
-
|
| 142 |
-
if image_data:
|
| 143 |
-
try:
|
| 144 |
-
image_bytes = base64.b64decode(image_data)
|
| 145 |
-
image_part = types.Part.from_bytes(data=image_bytes, mime_type="image/png")
|
| 146 |
-
contents.append(image_part)
|
| 147 |
-
except Exception as e:
|
| 148 |
-
logger.warning(f"Failed to process image attachment: {e}")
|
| 149 |
-
contents[0] += "\n[Error: Image attachment failed to load, rely on text]"
|
| 150 |
-
|
| 151 |
-
try:
|
| 152 |
-
response = self.client.models.generate_content(
|
| 153 |
-
model=target_model,
|
| 154 |
-
contents=contents,
|
| 155 |
-
config=types.GenerateContentConfig(
|
| 156 |
-
response_mime_type="application/json",
|
| 157 |
-
temperature=0.2
|
| 158 |
-
)
|
| 159 |
-
)
|
| 160 |
-
|
| 161 |
-
raw_text = response.text
|
| 162 |
-
if not raw_text:
|
| 163 |
-
raise ValueError("Empty response from Gemini API")
|
| 164 |
-
|
| 165 |
-
result = self._safe_parse_json(raw_text)
|
| 166 |
-
|
| 167 |
-
if result is None:
|
| 168 |
-
raise ValueError(f"Failed to parse JSON from response: {raw_text[:200]}...")
|
| 169 |
-
|
| 170 |
-
for k in result:
|
| 171 |
-
if isinstance(result[k], str):
|
| 172 |
-
result[k] = self._clean_text(result[k])
|
| 173 |
-
|
| 174 |
-
required_keys = ["latex", "reasoning", "final_answer", "confidence_score"]
|
| 175 |
-
for key in required_keys:
|
| 176 |
-
if key not in result:
|
| 177 |
-
logger.warning(f"Missing key {key} in Gemini response: {result.keys()}")
|
| 178 |
-
if key == "confidence_score":
|
| 179 |
-
result[key] = 0.0
|
| 180 |
-
else:
|
| 181 |
-
result[key] = "Error: Missing in response"
|
| 182 |
-
|
| 183 |
-
# Inject model info
|
| 184 |
-
result["model"] = "gemini"
|
| 185 |
-
return result
|
| 186 |
-
|
| 187 |
-
except Exception as e:
|
| 188 |
-
logger.error(f"Gemini API call failed: {e}")
|
| 189 |
-
raise
|
| 190 |
-
async def generate_with_tools(
|
| 191 |
-
self,
|
| 192 |
-
prompt: str,
|
| 193 |
-
tools: Optional[list] = None,
|
| 194 |
-
history: Optional[list] = None,
|
| 195 |
-
tool_config: Optional[Dict] = None
|
| 196 |
-
) -> Any:
|
| 197 |
-
"""
|
| 198 |
-
Generates content using Gemini with tool support (Function Calling).
|
| 199 |
-
Returns the raw response object to let Orchestrator handle tool calls.
|
| 200 |
-
"""
|
| 201 |
-
target_model = "gemini-2.5-flash" # Tools work well with Flash and it is generally available
|
| 202 |
-
|
| 203 |
-
try:
|
| 204 |
-
# Convert history to SDK format if needed, or rely on client.chats.create
|
| 205 |
-
# For now, we'll use a stateless generate_content with history in contents if simple,
|
| 206 |
-
# or better: use client.chats for multi-turn.
|
| 207 |
-
|
| 208 |
-
# Let's use the efficient generate_content with a list of messages
|
| 209 |
-
contents = []
|
| 210 |
-
|
| 211 |
-
if history:
|
| 212 |
-
contents.extend(history)
|
| 213 |
-
|
| 214 |
-
contents.append(types.Content(role="user", parts=[types.Part.from_text(text=prompt)]))
|
| 215 |
-
|
| 216 |
-
response = await asyncio.to_thread(
|
| 217 |
-
self.client.models.generate_content,
|
| 218 |
-
model=target_model,
|
| 219 |
-
contents=contents,
|
| 220 |
-
config=types.GenerateContentConfig(
|
| 221 |
-
tools=tools,
|
| 222 |
-
tool_config=tool_config,
|
| 223 |
-
temperature=0.0 # Strict for tools
|
| 224 |
-
)
|
| 225 |
-
)
|
| 226 |
-
return response
|
| 227 |
-
|
| 228 |
-
except Exception as e:
|
| 229 |
-
logger.error(f"Gemini Tool Generation failed: {e}")
|
| 230 |
-
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/models/qwen.py
DELETED
|
@@ -1,92 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
import json
|
| 3 |
-
import re
|
| 4 |
-
import asyncio
|
| 5 |
-
from typing import Any, Dict, Optional
|
| 6 |
-
import ollama
|
| 7 |
-
from app.models.base import BaseModel
|
| 8 |
-
|
| 9 |
-
logger = logging.getLogger(__name__)
|
| 10 |
-
|
| 11 |
-
class QwenModel(BaseModel):
|
| 12 |
-
"""
|
| 13 |
-
Wrapper for a local Qwen model via Ollama.
|
| 14 |
-
optimized for simple text-based internal reasoning/math.
|
| 15 |
-
"""
|
| 16 |
-
|
| 17 |
-
def __init__(self, model_name: str = "qwen2.5:3b"):
|
| 18 |
-
"""
|
| 19 |
-
Args:
|
| 20 |
-
model_name: The name of the model in Ollama (e.g., 'qwen2.5:7b', 'qwen2.5-math').
|
| 21 |
-
"""
|
| 22 |
-
self.model_name = model_name
|
| 23 |
-
|
| 24 |
-
async def solve(self, prompt: str, image_data: Optional[str] = None, **kwargs) -> Dict[str, Any]:
|
| 25 |
-
"""
|
| 26 |
-
Solves using local Qwen.
|
| 27 |
-
Note: Qwen 2.5 Math is text-only usually, unless using VL version.
|
| 28 |
-
We will reject images for now as per plan.
|
| 29 |
-
"""
|
| 30 |
-
if image_data:
|
| 31 |
-
logger.warning("QwenModel received image data, but ignoring it (text-only fallback).")
|
| 32 |
-
|
| 33 |
-
full_prompt = f"""
|
| 34 |
-
You are a helpful math assistant. Solve this problem carefully.
|
| 35 |
-
Return ONLY valid JSON.
|
| 36 |
-
|
| 37 |
-
Strategy: "Think Aloud -> Extract -> Solve -> Verify -> Box Answer"
|
| 38 |
-
|
| 39 |
-
Format:
|
| 40 |
-
{{
|
| 41 |
-
"latex": "The exact problem statement in LaTeX",
|
| 42 |
-
"reasoning": "Step-by-step logical derivation. Use standard sentences. Wrap formulas in $...$.",
|
| 43 |
-
"final_answer": "The bare result (boxed in \\boxed{{...}})",
|
| 44 |
-
"confidence_score": 0.0-1.0
|
| 45 |
-
}}
|
| 46 |
-
|
| 47 |
-
STRICT FORMATTING RULES:
|
| 48 |
-
- Use ASCII characters only.
|
| 49 |
-
- Use LaTeX for formulas.
|
| 50 |
-
- Wrap all formulas in $...$ or $$...$$
|
| 51 |
-
- Do NOT use unicode math symbols.
|
| 52 |
-
|
| 53 |
-
Problem: {prompt}
|
| 54 |
-
"""
|
| 55 |
-
|
| 56 |
-
try:
|
| 57 |
-
# Run blocking inference in a thread
|
| 58 |
-
response = await asyncio.to_thread(
|
| 59 |
-
ollama.chat,
|
| 60 |
-
model=self.model_name,
|
| 61 |
-
messages=[{'role': 'user', 'content': full_prompt}],
|
| 62 |
-
format='json'
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
content = response['message']['content']
|
| 66 |
-
|
| 67 |
-
# Parse JSON
|
| 68 |
-
try:
|
| 69 |
-
result = json.loads(content)
|
| 70 |
-
except json.JSONDecodeError:
|
| 71 |
-
logger.error(f"Failed to parse Qwen JSON: {content}")
|
| 72 |
-
# Fallback manual extraction could go here
|
| 73 |
-
return {
|
| 74 |
-
"answer": content,
|
| 75 |
-
"confidence": 0.5,
|
| 76 |
-
"model": "qwen",
|
| 77 |
-
"error": "json_parse_error"
|
| 78 |
-
}
|
| 79 |
-
|
| 80 |
-
# Normalize keys to match system expectation
|
| 81 |
-
if "answer" not in result and "final_answer" in result:
|
| 82 |
-
result["answer"] = result["final_answer"]
|
| 83 |
-
|
| 84 |
-
if "confidence" not in result:
|
| 85 |
-
result["confidence"] = result.get("confidence_score", 0.6)
|
| 86 |
-
|
| 87 |
-
result["model"] = "qwen"
|
| 88 |
-
return result
|
| 89 |
-
|
| 90 |
-
except Exception as e:
|
| 91 |
-
logger.error(f"Qwen/Ollama inference failed: {e}")
|
| 92 |
-
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|