Spaces:
Sleeping
Sleeping
Commit
·
13861c2
1
Parent(s):
140713c
Upd user_id
Browse files- legacy.py +1 -1
- routes/reports.py +4 -3
- utils/api/router.py +3 -3
legacy.py
CHANGED
|
@@ -791,7 +791,7 @@ Return only the variations, one per line, no numbering or extra text."""
|
|
| 791 |
|
| 792 |
from utils.api.router import generate_answer_with_model
|
| 793 |
selection = {"provider": "nvidia", "model": "meta/llama-3.1-8b-instruct"}
|
| 794 |
-
response = await generate_answer_with_model(selection, sys_prompt, user_prompt, None, nvidia_rotator)
|
| 795 |
|
| 796 |
# Parse variations
|
| 797 |
variations = [line.strip() for line in response.split('\n') if line.strip()]
|
|
|
|
| 791 |
|
| 792 |
from utils.api.router import generate_answer_with_model
|
| 793 |
selection = {"provider": "nvidia", "model": "meta/llama-3.1-8b-instruct"}
|
| 794 |
+
response = await generate_answer_with_model(selection, sys_prompt, user_prompt, None, nvidia_rotator, user_id="system", context="legacy_analysis")
|
| 795 |
|
| 796 |
# Parse variations
|
| 797 |
variations = [line.strip() for line in response.split('\n') if line.strip()]
|
routes/reports.py
CHANGED
|
@@ -155,7 +155,7 @@ async def generate_report(
|
|
| 155 |
# Step 2: Execute detailed subtasks based on CoT plan
|
| 156 |
logger.info("[REPORT] Executing detailed subtasks")
|
| 157 |
update_report_status(session_id, "processing", "Processing data...", 40)
|
| 158 |
-
detailed_analysis = await execute_detailed_subtasks(cot_plan, context_text, web_context_block, eff_name, nvidia_rotator, gemini_rotator)
|
| 159 |
|
| 160 |
# Step 3: Synthesize comprehensive report from detailed analysis
|
| 161 |
logger.info("[REPORT] Synthesizing comprehensive report")
|
|
@@ -453,7 +453,7 @@ Create a simple plan for this report."""
|
|
| 453 |
}
|
| 454 |
|
| 455 |
|
| 456 |
-
async def execute_detailed_subtasks(cot_plan: Dict[str, Any], context_text: str, web_context: str, filename: str, nvidia_rotator, gemini_rotator) -> Dict[str, Any]:
|
| 457 |
"""Execute detailed analysis for each subtask with hierarchical section assignment and CoT references."""
|
| 458 |
detailed_analysis = {}
|
| 459 |
synthesis_strategy = cot_plan.get("synthesis_strategy", {})
|
|
@@ -1326,7 +1326,8 @@ Return the renumbered headings in the format: "level: new_number: heading_text"
|
|
| 1326 |
@app.post("/mermaid/fix")
|
| 1327 |
async def fix_mermaid_syntax(
|
| 1328 |
mermaid_code: str = Form(...),
|
| 1329 |
-
error_message: str = Form("")
|
|
|
|
| 1330 |
):
|
| 1331 |
"""
|
| 1332 |
Fix Mermaid diagram syntax using AI for UI rendering.
|
|
|
|
| 155 |
# Step 2: Execute detailed subtasks based on CoT plan
|
| 156 |
logger.info("[REPORT] Executing detailed subtasks")
|
| 157 |
update_report_status(session_id, "processing", "Processing data...", 40)
|
| 158 |
+
detailed_analysis = await execute_detailed_subtasks(cot_plan, context_text, web_context_block, eff_name, nvidia_rotator, gemini_rotator, user_id)
|
| 159 |
|
| 160 |
# Step 3: Synthesize comprehensive report from detailed analysis
|
| 161 |
logger.info("[REPORT] Synthesizing comprehensive report")
|
|
|
|
| 453 |
}
|
| 454 |
|
| 455 |
|
| 456 |
+
async def execute_detailed_subtasks(cot_plan: Dict[str, Any], context_text: str, web_context: str, filename: str, nvidia_rotator, gemini_rotator, user_id: str = "") -> Dict[str, Any]:
|
| 457 |
"""Execute detailed analysis for each subtask with hierarchical section assignment and CoT references."""
|
| 458 |
detailed_analysis = {}
|
| 459 |
synthesis_strategy = cot_plan.get("synthesis_strategy", {})
|
|
|
|
| 1326 |
@app.post("/mermaid/fix")
|
| 1327 |
async def fix_mermaid_syntax(
|
| 1328 |
mermaid_code: str = Form(...),
|
| 1329 |
+
error_message: str = Form(""),
|
| 1330 |
+
user_id: str = Form("system")
|
| 1331 |
):
|
| 1332 |
"""
|
| 1333 |
Fix Mermaid diagram syntax using AI for UI rendering.
|
utils/api/router.py
CHANGED
|
@@ -190,7 +190,7 @@ async def generate_answer_with_model(selection: Dict[str, Any], system_prompt: s
|
|
| 190 |
# Fallback: Qwen → NVIDIA_SMALL
|
| 191 |
logger.info("Falling back from Qwen to NVIDIA_SMALL")
|
| 192 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 193 |
-
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator)
|
| 194 |
elif provider == "nvidia_large":
|
| 195 |
# Use NVIDIA Large (GPT-OSS) for hard/long context tasks with fallback
|
| 196 |
try:
|
|
@@ -200,7 +200,7 @@ async def generate_answer_with_model(selection: Dict[str, Any], system_prompt: s
|
|
| 200 |
# Fallback: NVIDIA_LARGE → NVIDIA_SMALL
|
| 201 |
logger.info("Falling back from NVIDIA_LARGE to NVIDIA_SMALL")
|
| 202 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 203 |
-
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator)
|
| 204 |
elif provider == "nvidia_coder":
|
| 205 |
# Use NVIDIA Coder for code generation tasks with fallback
|
| 206 |
try:
|
|
@@ -211,7 +211,7 @@ async def generate_answer_with_model(selection: Dict[str, Any], system_prompt: s
|
|
| 211 |
# Fallback: NVIDIA_CODER → NVIDIA_SMALL
|
| 212 |
logger.info("Falling back from NVIDIA_CODER to NVIDIA_SMALL")
|
| 213 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 214 |
-
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator)
|
| 215 |
|
| 216 |
return "Unsupported provider."
|
| 217 |
|
|
|
|
| 190 |
# Fallback: Qwen → NVIDIA_SMALL
|
| 191 |
logger.info("Falling back from Qwen to NVIDIA_SMALL")
|
| 192 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 193 |
+
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator, user_id, context)
|
| 194 |
elif provider == "nvidia_large":
|
| 195 |
# Use NVIDIA Large (GPT-OSS) for hard/long context tasks with fallback
|
| 196 |
try:
|
|
|
|
| 200 |
# Fallback: NVIDIA_LARGE → NVIDIA_SMALL
|
| 201 |
logger.info("Falling back from NVIDIA_LARGE to NVIDIA_SMALL")
|
| 202 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 203 |
+
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator, user_id, context)
|
| 204 |
elif provider == "nvidia_coder":
|
| 205 |
# Use NVIDIA Coder for code generation tasks with fallback
|
| 206 |
try:
|
|
|
|
| 211 |
# Fallback: NVIDIA_CODER → NVIDIA_SMALL
|
| 212 |
logger.info("Falling back from NVIDIA_CODER to NVIDIA_SMALL")
|
| 213 |
fallback_selection = {"provider": "nvidia", "model": NVIDIA_SMALL}
|
| 214 |
+
return await generate_answer_with_model(fallback_selection, system_prompt, user_prompt, gemini_rotator, nvidia_rotator, user_id, context)
|
| 215 |
|
| 216 |
return "Unsupported provider."
|
| 217 |
|