Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
Β·
d9dfa71
1
Parent(s):
0411044
lefg
Browse files- app.py +7 -1
- src/agents/deep_agents.py +18 -0
app.py
CHANGED
|
@@ -2229,9 +2229,11 @@ async def _generate_deep_analysis_stream(session_state: dict, goal: str, session
|
|
| 2229 |
desc = session_state['description']
|
| 2230 |
|
| 2231 |
# Generate dataset info for all datasets
|
| 2232 |
-
|
| 2233 |
|
| 2234 |
dataset_info = desc
|
|
|
|
|
|
|
| 2235 |
|
| 2236 |
|
| 2237 |
|
|
@@ -2435,6 +2437,10 @@ async def _generate_deep_analysis_stream(session_state: dict, goal: str, session
|
|
| 2435 |
# Use the new streaming method and forward all progress updates
|
| 2436 |
final_result = None
|
| 2437 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2438 |
async for update in deep_analyzer.execute_deep_analysis_streaming(
|
| 2439 |
goal=goal,
|
| 2440 |
dataset_info=dataset_info,
|
|
|
|
| 2229 |
desc = session_state['description']
|
| 2230 |
|
| 2231 |
# Generate dataset info for all datasets
|
| 2232 |
+
logger.log_message(f"π DEEP ANALYSIS START - datasets type: {type(datasets)}, keys: {list(datasets.keys()) if datasets else 'None'}", level=logging.DEBUG)
|
| 2233 |
|
| 2234 |
dataset_info = desc
|
| 2235 |
+
logger.log_message(f"π DEEP ANALYSIS - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 2236 |
+
logger.log_message(f"π DEEP ANALYSIS - dataset_info content: {dataset_info[:200]}...", level=logging.DEBUG)
|
| 2237 |
|
| 2238 |
|
| 2239 |
|
|
|
|
| 2437 |
# Use the new streaming method and forward all progress updates
|
| 2438 |
final_result = None
|
| 2439 |
|
| 2440 |
+
logger.log_message(f"π CALLING DEEP ANALYSIS - goal: {goal[:100]}...", level=logging.DEBUG)
|
| 2441 |
+
logger.log_message(f"π CALLING DEEP ANALYSIS - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 2442 |
+
logger.log_message(f"π CALLING DEEP ANALYSIS - session_datasets type: {type(datasets)}, keys: {list(datasets.keys()) if datasets else 'None'}", level=logging.DEBUG)
|
| 2443 |
+
|
| 2444 |
async for update in deep_analyzer.execute_deep_analysis_streaming(
|
| 2445 |
goal=goal,
|
| 2446 |
dataset_info=dataset_info,
|
src/agents/deep_agents.py
CHANGED
|
@@ -771,10 +771,15 @@ class deep_analysis_module(dspy.Module):
|
|
| 771 |
Execute deep analysis with streaming progress updates.
|
| 772 |
This is an async generator that yields progress updates incrementally.
|
| 773 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 774 |
# Make all session datasets available globally for code execution
|
| 775 |
if session_datasets is not None:
|
| 776 |
for dataset_name, dataset_df in session_datasets.items():
|
| 777 |
globals()[dataset_name] = dataset_df
|
|
|
|
| 778 |
|
| 779 |
try:
|
| 780 |
# Step 1: Generate deep questions (20% progress)
|
|
@@ -785,6 +790,7 @@ class deep_analysis_module(dspy.Module):
|
|
| 785 |
"progress": 10
|
| 786 |
}
|
| 787 |
|
|
|
|
| 788 |
questions = await self.deep_questions(goal=goal, dataset_info=dataset_info)
|
| 789 |
logger.log_message("Questions generated")
|
| 790 |
|
|
@@ -804,6 +810,7 @@ class deep_analysis_module(dspy.Module):
|
|
| 804 |
}
|
| 805 |
|
| 806 |
question_list = [q.strip() for q in questions.deep_questions.split('\n') if q.strip()]
|
|
|
|
| 807 |
deep_plan = await self.deep_planner(
|
| 808 |
deep_questions=questions.deep_questions,
|
| 809 |
dataset=dataset_info,
|
|
@@ -850,6 +857,7 @@ class deep_analysis_module(dspy.Module):
|
|
| 850 |
"progress": 45
|
| 851 |
}
|
| 852 |
|
|
|
|
| 853 |
queries = [
|
| 854 |
dspy.Example(
|
| 855 |
goal=questions.deep_questions,
|
|
@@ -864,7 +872,17 @@ class deep_analysis_module(dspy.Module):
|
|
| 864 |
)
|
| 865 |
for key in keys
|
| 866 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 867 |
tasks = [self.agents[key](**q) for q, key in zip(queries, keys)]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 868 |
|
| 869 |
# Await all tasks to complete
|
| 870 |
summaries = []
|
|
|
|
| 771 |
Execute deep analysis with streaming progress updates.
|
| 772 |
This is an async generator that yields progress updates incrementally.
|
| 773 |
"""
|
| 774 |
+
logger.log_message(f"π DEEP ANALYSIS STREAMING START - goal: {goal[:100]}...", level=logging.DEBUG)
|
| 775 |
+
logger.log_message(f"π DEEP ANALYSIS STREAMING START - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 776 |
+
logger.log_message(f"π DEEP ANALYSIS STREAMING START - session_datasets type: {type(session_datasets)}, keys: {list(session_datasets.keys()) if session_datasets else 'None'}", level=logging.DEBUG)
|
| 777 |
+
|
| 778 |
# Make all session datasets available globally for code execution
|
| 779 |
if session_datasets is not None:
|
| 780 |
for dataset_name, dataset_df in session_datasets.items():
|
| 781 |
globals()[dataset_name] = dataset_df
|
| 782 |
+
logger.log_message(f"π MADE DATASET AVAILABLE GLOBALLY - {dataset_name}: shape {dataset_df.shape}", level=logging.DEBUG)
|
| 783 |
|
| 784 |
try:
|
| 785 |
# Step 1: Generate deep questions (20% progress)
|
|
|
|
| 790 |
"progress": 10
|
| 791 |
}
|
| 792 |
|
| 793 |
+
logger.log_message(f"π CALLING DEEP_QUESTIONS - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 794 |
questions = await self.deep_questions(goal=goal, dataset_info=dataset_info)
|
| 795 |
logger.log_message("Questions generated")
|
| 796 |
|
|
|
|
| 810 |
}
|
| 811 |
|
| 812 |
question_list = [q.strip() for q in questions.deep_questions.split('\n') if q.strip()]
|
| 813 |
+
logger.log_message(f"π CALLING DEEP_PLANNER - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 814 |
deep_plan = await self.deep_planner(
|
| 815 |
deep_questions=questions.deep_questions,
|
| 816 |
dataset=dataset_info,
|
|
|
|
| 857 |
"progress": 45
|
| 858 |
}
|
| 859 |
|
| 860 |
+
logger.log_message(f"π CREATING DSPY EXAMPLES - dataset_info type: {type(dataset_info)}, length: {len(dataset_info) if isinstance(dataset_info, str) else 'N/A'}", level=logging.DEBUG)
|
| 861 |
queries = [
|
| 862 |
dspy.Example(
|
| 863 |
goal=questions.deep_questions,
|
|
|
|
| 872 |
)
|
| 873 |
for key in keys
|
| 874 |
]
|
| 875 |
+
|
| 876 |
+
# DEBUG: Log what's in each dspy.Example
|
| 877 |
+
for i, q in enumerate(queries):
|
| 878 |
+
logger.log_message(f"π DSPY EXAMPLE {i} - goal: {q.goal[:100] if hasattr(q, 'goal') else 'No goal'}...", level=logging.DEBUG)
|
| 879 |
+
logger.log_message(f"π DSPY EXAMPLE {i} - dataset: {q.dataset[:100] if hasattr(q, 'dataset') else 'No dataset'}...", level=logging.DEBUG)
|
| 880 |
+
logger.log_message(f"π DSPY EXAMPLE {i} - plan_instructions: {q.plan_instructions[:100] if hasattr(q, 'plan_instructions') else 'No plan_instructions'}...", level=logging.DEBUG)
|
| 881 |
tasks = [self.agents[key](**q) for q, key in zip(queries, keys)]
|
| 882 |
+
|
| 883 |
+
# DEBUG: Log what parameters each agent will receive
|
| 884 |
+
for q, key in zip(queries, keys):
|
| 885 |
+
logger.log_message(f"π AGENT {key} - will receive: goal={q.goal[:50] if hasattr(q, 'goal') else 'None'}..., dataset={q.dataset[:50] if hasattr(q, 'dataset') else 'None'}..., plan_instructions={q.plan_instructions[:50] if hasattr(q, 'plan_instructions') else 'None'}...", level=logging.DEBUG)
|
| 886 |
|
| 887 |
# Await all tasks to complete
|
| 888 |
summaries = []
|