{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sentientresearchagent.framework_entry import ProfiledSentientAgent, list_available_profiles\n",
    "import pprint\n",
    "import asyncio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ Available agent profiles: ['general_agent', 'deep_research_agent']\n"
     ]
    }
   ],
   "source": [
    "# 1. (Optional) List available profiles to see your options\n",
    "available_profiles = list_available_profiles()\n",
    "print(f\"✅ Available agent profiles: {available_profiles}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m2025-06-27 15:34:04.094\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.framework_entry\u001b[0m:\u001b[36mcreate_with_profile\u001b[0m:\u001b[36m564\u001b[0m - \u001b[1m🤖 Creating ProfiledSentientAgent with profile: deep_research_agent\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:04.223\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.hierarchical_agent_framework.agents\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m6\u001b[0m - \u001b[1m🤖 Initializing YAML-based agent system...\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:04.224\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.hierarchical_agent_framework.agents\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m42\u001b[0m - \u001b[1m✅ Agent system module loaded successfully\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:04.928\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.hierarchical_agent_framework.agents.definitions.utility_agents\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m27\u001b[0m - \u001b[1mSuccessfully initialized ContextSummarizer_Agno with model openrouter/google/gemini-2.5-flash-lite-preview-06-17\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.118\u001b[0m | \u001b[34m\u001b[1mDEBUG   \u001b[0m | \u001b[36msentientresearchagent.hierarchical_agent_framework.agent_configs.agent_factory\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m46\u001b[0m - \u001b[34m\u001b[1mLoaded environment variables from .env file\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.120\u001b[0m | \u001b[33m\u001b[1mWARNING \u001b[0m | \u001b[36msentientresearchagent.config.config_utils\u001b[0m:\u001b[36mfind_config_file\u001b[0m:\u001b[36m40\u001b[0m - \u001b[33m\u001b[1mNo configuration file found, will use environment variables and defaults\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.120\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.framework_entry\u001b[0m:\u001b[36mload_unified_config\u001b[0m:\u001b[36m82\u001b[0m - \u001b[1mLoading configuration from environment variables and defaults\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.121\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.config.config\u001b[0m:\u001b[36mfrom_env\u001b[0m:\u001b[36m300\u001b[0m - \u001b[1mConfiguration loaded from environment variables\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.121\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.config.config\u001b[0m:\u001b[36mvalidate_hitl_checkpoints\u001b[0m:\u001b[36m115\u001b[0m - \u001b[1mHITL checkpoint will be ignored due to hitl_root_plan_only=True\u001b[0m\n",
      "\u001b[32m2025-06-27 15:34:05.121\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36msentientresearchagent.config.config\u001b[0m:\u001b[36mvalidate_hitl_checkpoints\u001b[0m:\u001b[36m115\u001b[0m - \u001b[1mHITL checkpoint will be ignored due to hitl_root_plan_only=True\u001b[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DEBUG: Dynamically loaded TASK_TYPES_STR: 'WRITE', 'THINK', 'SEARCH', 'AGGREGATE', 'CODE_INTERPRET', 'IMAGE_GENERATION'\n",
      "DEBUG: Dynamically loaded NODE_TYPES_STR: 'PLAN', 'EXECUTE'\n",
      "Successfully initialized PlanModifier_Agno with model openrouter/anthropic/claude-4-sonnet\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLogging configured: level=INFO, file=sentient.log\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mConfig Override: 'enable_hitl' set to False.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mOverriding execution.max_recursion_depth with value: 1\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 SystemManager: Initializing core systems...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLogging configured: level=INFO, file=sentient.log\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🛡️  SystemManager: Setting up error handling...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m💾 SystemManager: Setting up cache system...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mCache manager initialized: memory backend, enabled=True\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🤖 SystemManager: Initializing agent registry...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔄 SystemManager: Integrating YAML-based agents into instance registry...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔄 Loading YAML-based agents into instance registry...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔄 Starting YAML agent integration with instance registry...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLoading agent configuration from: /Users/salahalzubi/cursor_projects/SentientResearchAgent/src/sentientresearchagent/hierarchical_agent_framework/agent_configs/agents.yaml\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLoaded configuration for 14 agents\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Both GOOGLE_API_KEY and GEMINI_API_KEY are set. Using GOOGLE_API_KEY.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m📋 Loaded configuration for 14 agents\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🚀 Creating 14 agents from configuration...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: CoreResearchPlanner (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for CoreResearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for CoreResearchPlanner: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: CoreResearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: EnhancedSearchPlanner (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for EnhancedSearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for EnhancedSearchPlanner: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: EnhancedSearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: EnhancedThinkPlanner (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for EnhancedThinkPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for EnhancedThinkPlanner: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: EnhancedThinkPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: EnhancedWritePlanner (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for EnhancedWritePlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for EnhancedWritePlanner: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: EnhancedWritePlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: DeepResearchPlanner (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for DeepResearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for DeepResearchPlanner: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: DeepResearchPlanner\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: GeneralTaskSolver (type: planner)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for GeneralTaskSolver\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for GeneralTaskSolver: PlannerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: GeneralTaskSolver\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: SearchExecutor (type: executor)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating OpenAI model: gpt-4.1\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for SearchExecutor\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for SearchExecutor: ExecutorAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: SearchExecutor\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: WebSearchResultsOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   🔧 Tools: ['DuckDuckGoTools']\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: BasicReasoningExecutor (type: executor)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for BasicReasoningExecutor\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for BasicReasoningExecutor: ExecutorAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: BasicReasoningExecutor\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: BasicReportWriter (type: executor)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for BasicReportWriter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for BasicReportWriter: ExecutorAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: BasicReportWriter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: OpenAICustomSearcher (type: custom_search)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 DEBUG: OPENAI_API_KEY from os.getenv: sk-proj-ye...QH4A\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 DEBUG: Found env var OPENAI_API_KEY: sk-proj-ye...QH4A\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mInitialized OpenAICustomSearchAdapter with model: gpt-4.1 (API key: sk-proj-ye...QH4A)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for OpenAICustomSearcher: OpenAICustomSearchAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: OpenAICustomSearcher\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: GeminiCustomSearcher (type: custom_search)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 DEBUG: GOOGLE_API_KEY/GEMINI_API_KEY from os.getenv: AIzaSyCgao...CFZ8\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 DEBUG: Found env var GOOGLE_API_KEY: AIzaSyCgao...CFZ8\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 DEBUG: Found env var GEMINI_API_KEY: AIzaSyCgao...CFZ8\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mInitialized GeminiCustomSearchAdapter with model: gemini-2.5-flash (API key: AIzaSyCgao...CFZ8)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for GeminiCustomSearcher: GeminiCustomSearchAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: GeminiCustomSearcher\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: DefaultAggregator (type: aggregator)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-flash\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for DefaultAggregator\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for DefaultAggregator: AggregatorAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: DefaultAggregator\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: DefaultAtomizer (type: atomizer)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-flash-lite-preview-06-17\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for DefaultAtomizer\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for DefaultAtomizer: AtomizerAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: DefaultAtomizer\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: AtomizerOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating agent: PlanModifier (type: plan_modifier)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 Creating LiteLLM model: openrouter/google/gemini-2.5-pro\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created AgnoAgent for PlanModifier\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Created valid BaseAdapter for PlanModifier: PlanModifierAdapter\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ Successfully created agent: PlanModifier\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Structured output: PlanOutput\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m📊 Agent creation summary:\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   ✅ Created: 14\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   ⏭️  Skipped: 0\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   ❌ Failed: 0\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🏭 Created 14 agents from configuration\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'CoreResearchPlanner' for action 'plan', task_type 'WRITE'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'CoreResearchPlanner' with name 'CoreResearchPlanner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'CoreResearchPlanner' with name 'default_planner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedSearchPlanner' for action 'plan', task_type 'SEARCH'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedSearchPlanner' with name 'EnhancedSearchPlanner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedSearchPlanner' with name 'search_planner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedThinkPlanner' for action 'plan', task_type 'THINK'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedThinkPlanner' with name 'EnhancedThinkPlanner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedThinkPlanner' with name 'think_planner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mOverwriting agent 'PlannerAdapter' with 'EnhancedWritePlanner' in agent_registry for key ('plan', <TaskType.WRITE: 'WRITE'>)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedWritePlanner' for action 'plan', task_type 'WRITE'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedWritePlanner' with name 'EnhancedWritePlanner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'EnhancedWritePlanner' with name 'write_planner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mOverwriting agent 'PlannerAdapter' with 'DeepResearchPlanner' in agent_registry for key ('plan', <TaskType.WRITE: 'WRITE'>)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DeepResearchPlanner' for action 'plan', task_type 'WRITE'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DeepResearchPlanner' with name 'DeepResearchPlanner'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mOverwriting agent 'PlannerAdapter' with 'GeneralTaskSolver' in agent_registry for key ('plan', <TaskType.WRITE: 'WRITE'>)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeneralTaskSolver' for action 'plan', task_type 'WRITE'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeneralTaskSolver' with name 'GeneralTaskSolver'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'SearchExecutor' with name 'SearchExecutor'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'SearchExecutor' with name 'default_search_executor'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'BasicReasoningExecutor' for action 'execute', task_type 'THINK'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'BasicReasoningExecutor' with name 'BasicReasoningExecutor'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'BasicReportWriter' for action 'execute', task_type 'WRITE'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'BasicReportWriter' with name 'BasicReportWriter'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'OpenAICustomSearchAdapter' for action 'execute', task_type 'SEARCH'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'OpenAICustomSearchAdapter' with name 'OpenAICustomSearcher'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'OpenAICustomSearchAdapter' with name 'default_openai_searcher'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mOverwriting agent 'OpenAICustomSearchAdapter' with 'GeminiCustomSearchAdapter' in agent_registry for key ('execute', <TaskType.SEARCH: 'SEARCH'>)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeminiCustomSearchAdapter' for action 'execute', task_type 'SEARCH'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeminiCustomSearchAdapter' with name 'GeminiCustomSearcher'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeminiCustomSearchAdapter' with name 'default_gemini_searcher'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'GeminiCustomSearchAdapter' with name 'gemini_searcher'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DefaultAggregator' for action 'aggregate', task_type 'None'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DefaultAggregator' with name 'default_aggregator'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DefaultAtomizer' for action 'atomize', task_type 'None'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'DefaultAtomizer' with name 'default_atomizer'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'PlanModifier' for action 'modify_plan', task_type 'None'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Registered adapter 'PlanModifier' with name 'PlanModifier'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ YAML agent integration completed successfully\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ YAML Agent Integration Results:\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   📋 Action keys registered: 13\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   🏷️  Named keys registered: 22\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   ⏭️  Skipped agents: 0\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m   ❌ Failed registrations: 0\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m📊 Final instance registry state - AGENTS: 9 entries, NAMED: 22 entries\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ YAML Integration Results: {'registered_action_keys': 13, 'registered_named_keys': 22, 'skipped_agents': 0, 'failed_registrations': 0, 'details': [{'name': 'CoreResearchPlanner', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.WRITE: 'WRITE'>)], 'named_keys_registered': ['CoreResearchPlanner', 'default_planner'], 'errors': []}, {'name': 'EnhancedSearchPlanner', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.SEARCH: 'SEARCH'>)], 'named_keys_registered': ['EnhancedSearchPlanner', 'search_planner'], 'errors': []}, {'name': 'EnhancedThinkPlanner', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.THINK: 'THINK'>)], 'named_keys_registered': ['EnhancedThinkPlanner', 'think_planner'], 'errors': []}, {'name': 'EnhancedWritePlanner', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.WRITE: 'WRITE'>)], 'named_keys_registered': ['EnhancedWritePlanner', 'write_planner'], 'errors': []}, {'name': 'DeepResearchPlanner', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.WRITE: 'WRITE'>)], 'named_keys_registered': ['DeepResearchPlanner'], 'errors': []}, {'name': 'GeneralTaskSolver', 'type': 'planner', 'action_keys_registered': [('plan', <TaskType.WRITE: 'WRITE'>)], 'named_keys_registered': ['GeneralTaskSolver'], 'errors': []}, {'name': 'SearchExecutor', 'type': 'executor', 'action_keys_registered': [], 'named_keys_registered': ['SearchExecutor', 'default_search_executor'], 'errors': []}, {'name': 'BasicReasoningExecutor', 'type': 'executor', 'action_keys_registered': [('execute', <TaskType.THINK: 'THINK'>)], 'named_keys_registered': ['BasicReasoningExecutor'], 'errors': []}, {'name': 'BasicReportWriter', 'type': 'executor', 'action_keys_registered': [('execute', <TaskType.WRITE: 'WRITE'>)], 'named_keys_registered': ['BasicReportWriter'], 'errors': []}, {'name': 'OpenAICustomSearcher', 'type': 'custom_search', 'action_keys_registered': [('execute', <TaskType.SEARCH: 'SEARCH'>)], 'named_keys_registered': ['OpenAICustomSearcher', 'default_openai_searcher'], 'errors': []}, {'name': 'GeminiCustomSearcher', 'type': 'custom_search', 'action_keys_registered': [('execute', <TaskType.SEARCH: 'SEARCH'>)], 'named_keys_registered': ['GeminiCustomSearcher', 'default_gemini_searcher', 'gemini_searcher'], 'errors': []}, {'name': 'DefaultAggregator', 'type': 'aggregator', 'action_keys_registered': [('aggregate', None)], 'named_keys_registered': ['default_aggregator'], 'errors': []}, {'name': 'DefaultAtomizer', 'type': 'atomizer', 'action_keys_registered': [('atomize', None)], 'named_keys_registered': ['default_atomizer'], 'errors': []}, {'name': 'PlanModifier', 'type': 'plan_modifier', 'action_keys_registered': [('modify_plan', None)], 'named_keys_registered': ['PlanModifier'], 'errors': []}]}\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ SystemManager: Agent registry loaded: 9 adapters, 22 named agents\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🧠 SystemManager: Initializing display-only HAF components...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ SystemManager: Core systems initialized successfully!\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m📊 Cache: memory backend, 0 items\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m⚙️  Execution: max 5 concurrent nodes\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔗 LLM: openai/gpt-4\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🎮 HITL Master: Disabled\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m⚙️ SystemManager: Applying agent profile 'deep_research_agent'...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLoading agent profile from: /Users/salahalzubi/cursor_projects/SentientResearchAgent/src/sentientresearchagent/hierarchical_agent_framework/agent_configs/profiles/deep_research_agent.yaml\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mSuccessfully loaded profile 'deep_research_agent' with 3 planner mappings and 3 executor mappings\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mUpdated self.config.active_profile_name to 'deep_research_agent'.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mLoading agent profile from: /Users/salahalzubi/cursor_projects/SentientResearchAgent/src/sentientresearchagent/hierarchical_agent_framework/agent_configs/profiles/deep_research_agent.yaml\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mSuccessfully loaded profile 'deep_research_agent' with 3 planner mappings and 3 executor mappings\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mProfile 'deep_research_agent' validation issues:\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔧 SystemManager: Creating execution components for backward compatibility...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mHITLCoordinator initialized with config: HITL Plan Gen=False, HITL Atomizer=False, HITL Pre-Exec=False\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor initialized.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor initialized without a specific Agent Blueprint.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mHITLCoordinator initialized with config: HITL Plan Gen=False, HITL Atomizer=False, HITL Pre-Exec=False\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor initialized with handlers for statuses: [<TaskStatus.READY: 'READY'>, <TaskStatus.AGGREGATING: 'AGGREGATING'>, <TaskStatus.NEEDS_REPLAN: 'NEEDS_REPLAN'>]\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mExecutionEngine initialized with provided NodeProcessor.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mExecutionEngine initialized.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ SystemManager: Execution components created for backward compatibility\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ SystemManager: Successfully applied profile 'deep_research_agent'. Profile is ready for project-specific execution contexts.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m📊 Cache: memory backend, 0 items\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m⚙️  Execution: max 5 concurrent nodes\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔗 LLM: openai/gpt-4\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🎮 HITL Master: Disabled\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mSystemManager initialized and configured with profile: deep_research_agent\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mSentientAgent initialized. SystemManager active profile: deep_research_agent. HITL from config: disabled\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ ProfiledSentientAgent created successfully with profile: deep_research_agent\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "profile_to_use = \"deep_research_agent\"\n",
    "agent = ProfiledSentientAgent.create_with_profile(\n",
    "    profile_name=profile_to_use,\n",
    "    enable_hitl_override=False,  # Set to True if you want to interactively approve steps\n",
    "    max_planning_depth=1         # Optional: Override the agent's max recursion depth\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ Available agent profiles: ['general_agent', 'deep_research_agent']\n"
     ]
    }
   ],
   "source": [
    "# 1. (Optional) List available profiles to see your options\n",
    "available_profiles = list_available_profiles()\n",
    "print(f\"✅ Available agent profiles: {available_profiles}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "goal = \"What are the main differences between supervised and unsupervised machine learning? Provide a concise summary.\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m[exec_74580a84] Starting execution: What are the main differences between supervised and unsupervised machine learning? Provide a concis...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mExecutionEngine: Starting project flow with root goal: 'What are the main differences between supervised and unsupervised machine learning? Provide a concise summary.'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added graph 'root_graph'. Is root: True\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added node 'root' to graph 'root_graph'.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mProjectInitializer: Initialized with root node: root in graph root_graph.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mExecutionEngine: Project initialized. Proceeding directly to execution cycle.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Starting Execution Cycle (max_steps: 250) ---\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 1 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: PENDING → READY. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root PENDING -> READY (Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager: Found 1 READY nodes. Queueing for parallel processing.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root (Status: READY, Type: PLAN, Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: No in-memory trace for node root, checking disk...\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Loaded trace from disk for node root: 8 stages\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Trace already exists for node root, returning existing\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ReadyNodeHandler: Handling READY node root (Initial NodeType: PLAN, Layer: 0, Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: READY → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root (Layer 0) proceeding to atomization (layer < 1).\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: atomize_node called for root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 8 stages\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'atomization' for node root (stage_id: fe521ca4-7b15-463d-bc1e-d40aec232bd9)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    NodeAtomizer: Atomizing for node root (Blueprint: N/A, Goal: 'What are the main differences ...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: Starting atomization process for root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: current_task_type_value = WRITE\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: context_builder_agent_name = default_atomizer\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: About to resolve context for agent\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root' (Agent: default_atomizer, Type: WRITE)\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mContextBuilder: No relevant context items found for task 'root'.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: Context resolved successfully\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: lookup_name_for_atomizer = None\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: About to call get_agent_adapter with action_verb='atomize'\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'AtomizerAdapter' for key ('atomize', None) for node root.\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: get_agent_adapter returned: <sentientresearchagent.hierarchical_agent_framework.agents.adapters.AtomizerAdapter object at 0x1585f6c90>\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: Found atomizer adapter, proceeding with atomization\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    NodeAtomizer: Calling Atomizer adapter 'DefaultAtomizer' for node root ('What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: About to call atomizer_adapter.process\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  AtomizerAdapter: Processing atomization task for node root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'DefaultAtomizer': Processing node root (Goal: 'What are the main differences between supervised a...')\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mUpdating existing atomization stage for node root\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔥 Falling back to old context method for DefaultAtomizer\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:05\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'atomization' for node root with ['agent_name', 'adapter_name', 'model_info', 'system_prompt', 'user_input', 'input_context', 'processing_parameters']\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/salahalzubi/cursor_projects/SentientResearchAgent/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings:\n",
      "  PydanticSerializationUnexpectedValue(Expected 9 fields but got 5: Expected `Message` - serialized value may not be as expected [input_value=Message(content='{\"is_ato...one, 'reasoning': None}), input_type=Message])\n",
      "  PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...finish_reason': 'STOP'}), input_type=Choices])\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 ATOMIZATION: Storing full response (134 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'atomization' for node root with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAdapter 'DefaultAtomizer': Successfully processed. Type of actual_content_data: <class 'sentientresearchagent.hierarchical_agent_framework.context.agent_io_models.AtomizerOutput'>\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 ATOMIZATION: Storing output data (134 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'atomization' for node root\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: atomizer_adapter.process returned: is_atomic=False updated_goal='Provide a concise summary of the main differences between supervised and unsupervised machine learning.'\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Atomizer suggested goal refinement for root: 'What are the main differences between supervised a...' -> 'Provide a concise summary of the main differences ...'\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Atomizer determined root as PLAN.\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: About to call HITL review\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: HITL review returned: {'status': 'approved', 'message': 'HITL skipped (master setting is disabled).'}\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'atomization' for node root\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    🐛 DEBUG: Returning action_to_take: PLAN\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Atomizer for node root determined NodeType: PLAN\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root is NodeType.PLAN. Calling ready_plan_handler.\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyPlanHandler: Planning for node root (Blueprint: N/A, Goal: 'What are the main differences between supervised a...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 9 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'planning' for node root (stage_id: 6a0fa582-9a1c-4027-b873-73cebcb42d32)\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'PlannerAdapter' for key ('plan', <TaskType.WRITE: 'WRITE'>) for node root.\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyPlanHandler: Using PLAN adapter 'GeneralTaskSolver' for node root\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root' (Agent: None, Type: WRITE)\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mContextBuilder: No relevant context items found for task 'root'.\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PlannerAdapter: Processing planning task for node root\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'GeneralTaskSolver': Processing node root (Goal: 'What are the main differences between supervised a...')\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mUpdating existing planning stage for node root\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔥 Falling back to old context method for GeneralTaskSolver\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:06\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'planning' for node root with ['agent_name', 'adapter_name', 'model_info', 'system_prompt', 'user_input', 'input_context', 'processing_parameters']\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/salahalzubi/cursor_projects/SentientResearchAgent/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings:\n",
      "  PydanticSerializationUnexpectedValue(Expected 9 fields but got 6: Expected `Message` - serialized value may not be as expected [input_value=Message(content='```json\\...ve to the user.\\n\\n\\n'}), input_type=Message])\n",
      "  PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...finish_reason': 'STOP'}), input_type=Choices])\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 PLANNING: Storing full response (1405 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'planning' for node root with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAdapter 'GeneralTaskSolver': Successfully processed. Type of actual_content_data: <class 'sentientresearchagent.hierarchical_agent_framework.context.agent_io_models.PlanOutput'>\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 PLANNING: Storing output data (1405 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'planning' for node root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m✅ ReadyPlanHandler: Plan approved for node root, creating 4 sub-tasks\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added graph 'subgraph_root'. Is root: False\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    SubNodeCreator: Created new subgraph 'subgraph_root' for parent 'root'\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added node 'root.1' to graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.1\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m      SubNodeCreator: Added sub-node: root.1 ('Define supervised machine lear...') to graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added node 'root.2' to graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.2\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m      SubNodeCreator: Added sub-node: root.2 ('Define unsupervised machine le...') to graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added node 'root.3' to graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.3\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m      SubNodeCreator: Added sub-node: root.3 ('Compare the key characteristic...') to graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added node 'root.4' to graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.4\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m      SubNodeCreator: Added sub-node: root.4 ('Synthesize the findings from t...') to graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added edge root.1 -> root.3 in graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m      SubNodeCreator: Added dependency edge: root.1 -> root.3 in graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added edge root.2 -> root.3 in graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m      SubNodeCreator: Added dependency edge: root.2 -> root.3 in graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTaskGraph: Added edge root.3 -> root.4 in graph 'subgraph_root'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m      SubNodeCreator: Added dependency edge: root.3 -> root.4 in graph subgraph_root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    SubNodeCreator: Created 4 sub-nodes for parent root with specified dependencies.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: RUNNING → PLAN_DONE. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m✅ ReadyPlanHandler: Node root planning complete. Status: PLAN_DONE\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'planning' for node root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root status changed from READY to PLAN_DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root. Final status: PLAN_DONE\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 2 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.1 status: PENDING → READY. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.1\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root.1 PENDING -> READY (Goal: 'Define supervised machine lear...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.2 status: PENDING → READY. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.2\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root.2 PENDING -> READY (Goal: 'Define unsupervised machine le...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager: Found 2 READY nodes. Queueing for parallel processing.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root.1 (Status: READY, Type: EXECUTE, Goal: 'Define supervised machine lear...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: No in-memory trace for node root.1, checking disk...\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Loaded trace from disk for node root.1: 5 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Trace already exists for node root.1, returning existing\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ReadyNodeHandler: Handling READY node root.1 (Initial NodeType: EXECUTE, Layer: 1, Goal: 'Define supervised machine lear...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.1 status: READY → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root.1 (Layer 1) is at or exceeds max_planning_layer (1). Forcing to EXECUTE and skipping atomization.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Executing node root.1 (Blueprint: N/A, Goal: 'Define supervised machine learning, focusing on it...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 5 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.1 (stage_id: 8f9a87c5-338e-4729-9401-1ca66772fced)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root.1' (Agent: default_executor, Type: SEARCH)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ParentContextStrategy: Added context from PARENT: root (Status: PLAN_DONE). How: used existing output_summary (len: 25). Final len: 25\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mContextBuilder: Found 1 relevant context items for task 'root.1'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.1 with ['input_context', 'user_input']\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'GeminiCustomSearchAdapter' for key ('execute', <TaskType.SEARCH: 'SEARCH'>) for node root.1.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Using EXECUTE adapter 'GeminiCustomSearchAdapter' for node root.1\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.1 with ['agent_name', 'adapter_name']\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mTask root.1: Potentially invalid status transition RUNNING → RUNNING\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.1 status: RUNNING → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'GeminiCustomSearchAdapter': Processing node root.1 (Query: 'Define supervised machine learning, focusing on its reliance on labeled data, its primary goal of pr...') with Gemini model gemini-2.5-flash\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.1 (stage_id: 1499f378-5232-4d35-8825-bddb13324fff)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root.2 (Status: READY, Type: EXECUTE, Goal: 'Define unsupervised machine le...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: No in-memory trace for node root.2, checking disk...\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Loaded trace from disk for node root.2: 4 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Trace already exists for node root.2, returning existing\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ReadyNodeHandler: Handling READY node root.2 (Initial NodeType: EXECUTE, Layer: 1, Goal: 'Define unsupervised machine le...')\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.2 status: READY → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root.2 (Layer 1) is at or exceeds max_planning_layer (1). Forcing to EXECUTE and skipping atomization.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Executing node root.2 (Blueprint: N/A, Goal: 'Define unsupervised machine learning, focusing on ...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 4 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.2 (stage_id: a66b8fc6-5180-4628-aa16-6756cf6e3225)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root.2' (Agent: default_executor, Type: SEARCH)\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ParentContextStrategy: Added context from PARENT: root (Status: PLAN_DONE). How: used existing output_summary (len: 25). Final len: 25\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mContextBuilder: Found 1 relevant context items for task 'root.2'.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 5 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.2 with ['input_context', 'user_input']\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'GeminiCustomSearchAdapter' for key ('execute', <TaskType.SEARCH: 'SEARCH'>) for node root.2.\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Using EXECUTE adapter 'GeminiCustomSearchAdapter' for node root.2\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 5 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.2 with ['agent_name', 'adapter_name']\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mTask root.2: Potentially invalid status transition RUNNING → RUNNING\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.2 status: RUNNING → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'GeminiCustomSearchAdapter': Processing node root.2 (Query: 'Define unsupervised machine learning, focusing on its use of unlabeled data, its primary goal of fin...') with Gemini model gemini-2.5-flash\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 5 stages\u001b[0m\n",
      "\u001b[32m15:34:18\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.2 (stage_id: 465fe50b-eec6-40df-b808-7d4f7e85c4ca)\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m    GeminiCustomSearchAdapter: Retrieved response text (length: 1268).\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m    GeminiCustomSearchAdapter: Retrieved 7 grounding citations.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m  Adapter 'GeminiCustomSearchAdapter': Processed. Main output: 'Supervised machine learning is a fundamental artificial intelligence technique that involves training algorithms on *labeled data* to identify underly...', Citations processed: 7\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 7 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.1 with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 7 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'execution' for node root.1\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.1: 7 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.1 with ['output_data']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.1 status: RUNNING → DONE. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mReadyExecuteHandler: Node root.1 execution complete. Status: DONE.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root.1 status changed from READY to DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.1\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root.1. Final status: DONE\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m    GeminiCustomSearchAdapter: Retrieved response text (length: 2184).\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m    GeminiCustomSearchAdapter: Retrieved 17 grounding citations.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m  Adapter 'GeminiCustomSearchAdapter': Processed. Main output: 'Unsupervised machine learning is a branch of machine learning that analyzes and interprets unlabeled datasets to discover hidden patterns, groupings, ...', Citations processed: 17\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.2 with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'execution' for node root.2\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.2: 6 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.2 with ['output_data']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.2 status: RUNNING → DONE. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mReadyExecuteHandler: Node root.2 execution complete. Status: DONE.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root.2 status changed from READY to DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.2\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root.2. Final status: DONE\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 3 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.3 status: PENDING → READY. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.3\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root.3 PENDING -> READY (Goal: 'Compare the key characteristic...')\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager: Found 1 READY nodes. Queueing for parallel processing.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root.3 (Status: READY, Type: EXECUTE, Goal: 'Compare the key characteristic...')\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: No in-memory trace for node root.3, checking disk...\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1m🔍 TRACE: No trace found for node root.3 (in-memory or disk)\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Current in-memory traces: ['root', 'root.1', 'root.2']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Created processing trace baf7ac63-0e4e-4102-b6fb-933d4a7850f6 for node root.3\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ReadyNodeHandler: Handling READY node root.3 (Initial NodeType: EXECUTE, Layer: 1, Goal: 'Compare the key characteristic...')\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.3 status: READY → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root.3 (Layer 1) is at or exceeds max_planning_layer (1). Forcing to EXECUTE and skipping atomization.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Executing node root.3 (Blueprint: N/A, Goal: 'Compare the key characteristics of supervised and ...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 0 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.3 (stage_id: b31ccb04-5464-4def-a489-eaf5f75fd267)\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root.3' (Agent: default_executor, Type: THINK)\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ParentContextStrategy: Added context from PARENT: root (Status: PLAN_DONE). How: used existing output_summary (len: 25). Final len: 25\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PrerequisiteSiblingContextStrategy: Added context from PREREQUISITE SIBLING: root.1. How: used existing output_summary (len: 36). Final len: 36\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PrerequisiteSiblingContextStrategy: Added context from PREREQUISITE SIBLING: root.2. How: used existing output_summary (len: 36). Final len: 36\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mContextBuilder: Found 3 relevant context items for task 'root.3'.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.3 with ['input_context', 'user_input']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'ExecutorAdapter' for key ('execute', <TaskType.THINK: 'THINK'>) for node root.3.\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Using EXECUTE adapter 'BasicReasoningExecutor' for node root.3\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.3 with ['agent_name', 'adapter_name']\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mTask root.3: Potentially invalid status transition RUNNING → RUNNING\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.3 status: RUNNING → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ExecutorAdapter: Processing execution task for node root.3\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'BasicReasoningExecutor': Processing node root.3 (Goal: 'Compare the key characteristics of supervised and ...')\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mUpdating existing execution stage for node root.3\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔥 Using enhanced hierarchical context for BasicReasoningExecutor\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:29\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.3 with ['agent_name', 'adapter_name', 'model_info', 'system_prompt', 'user_input', 'input_context', 'processing_parameters']\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/salahalzubi/cursor_projects/SentientResearchAgent/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings:\n",
      "  PydanticSerializationUnexpectedValue(Expected 9 fields but got 6: Expected `Message` - serialized value may not be as expected [input_value=Message(content='# Analys... the structure.\\n\\n\\n\"}), input_type=Message])\n",
      "  PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...finish_reason': 'STOP'}), input_type=Choices])\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Storing full response (4808 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.3 with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAdapter 'BasicReasoningExecutor': Successfully processed. Type of actual_content_data: <class 'str'>\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Storing output data (2043 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'execution' for node root.3\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ExecutorAdapter: Wrapping raw string output in standardized dictionary for node root.3\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.3: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.3 with ['output_data']\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.3 status: RUNNING → DONE. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mReadyExecuteHandler: Node root.3 execution complete. Status: DONE.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root.3 status changed from READY to DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.3\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root.3. Final status: DONE\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 4 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.4 status: PENDING → READY. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.4\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root.4 PENDING -> READY (Goal: 'Synthesize the findings from t...')\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager: Found 1 READY nodes. Queueing for parallel processing.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root.4 (Status: READY, Type: EXECUTE, Goal: 'Synthesize the findings from t...')\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: No in-memory trace for node root.4, checking disk...\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1m🔍 TRACE: No trace found for node root.4 (in-memory or disk)\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Current in-memory traces: ['root', 'root.1', 'root.2', 'root.3']\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Created processing trace 27d4763c-17d6-4331-ae57-2ece5c8d476c for node root.4\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ReadyNodeHandler: Handling READY node root.4 (Initial NodeType: EXECUTE, Layer: 1, Goal: 'Synthesize the findings from t...')\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.4 status: READY → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyNodeHandler: Node root.4 (Layer 1) is at or exceeds max_planning_layer (1). Forcing to EXECUTE and skipping atomization.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Executing node root.4 (Blueprint: N/A, Goal: 'Synthesize the findings from the previous steps in...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 0 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root.4 (stage_id: 0379c36b-7e32-44b1-9824-a598ec6e7d74)\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mContextBuilder: Resolving context for task 'root.4' (Agent: default_executor, Type: WRITE)\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ParentContextStrategy: Added context from PARENT: root (Status: PLAN_DONE). How: used existing output_summary (len: 25). Final len: 25\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PrerequisiteSiblingContextStrategy: Added context from PREREQUISITE SIBLING: root.1. How: used existing output_summary (len: 36). Final len: 36\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PrerequisiteSiblingContextStrategy: Added context from PREREQUISITE SIBLING: root.2. How: used existing output_summary (len: 36). Final len: 36\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  PrerequisiteSiblingContextStrategy: Added context from PREREQUISITE SIBLING: root.3. How: used existing output_summary (len: 36). Final len: 36\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mContextBuilder: Found 4 relevant context items for task 'root.4'.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.4 with ['input_context', 'user_input']\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'ExecutorAdapter' for key ('execute', <TaskType.WRITE: 'WRITE'>) for node root.4.\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ReadyExecuteHandler: Using EXECUTE adapter 'BasicReportWriter' for node root.4\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.4 with ['agent_name', 'adapter_name']\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mTask root.4: Potentially invalid status transition RUNNING → RUNNING\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.4 status: RUNNING → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  ExecutorAdapter: Processing execution task for node root.4\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'BasicReportWriter': Processing node root.4 (Goal: 'Synthesize the findings from the previous steps in...')\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mUpdating existing execution stage for node root.4\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔥 Using enhanced hierarchical context for BasicReportWriter\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:48\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.4 with ['agent_name', 'adapter_name', 'model_info', 'system_prompt', 'user_input', 'input_context', 'processing_parameters']\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/salahalzubi/cursor_projects/SentientResearchAgent/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings:\n",
      "  PydanticSerializationUnexpectedValue(Expected 9 fields but got 6: Expected `Message` - serialized value may not be as expected [input_value=Message(content=\"The prim...r the question.\\n\\n\\n\"}), input_type=Message])\n",
      "  PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...finish_reason': 'STOP'}), input_type=Choices])\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Storing full response (1019 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.4 with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAdapter 'BasicReportWriter': Successfully processed. Type of actual_content_data: <class 'str'>\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Storing output data (1019 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'execution' for node root.4\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    ExecutorAdapter: Wrapping raw string output in standardized dictionary for node root.4\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root.4: 1 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root.4 with ['output_data']\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root.4 status: RUNNING → DONE. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1mReadyExecuteHandler: Node root.4 execution complete. Status: DONE.\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root.4 status changed from READY to DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root.4\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root.4. Final status: DONE\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 5 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root can AGGREGATE: All 4 sub-tasks in 'subgraph_root' are finished.\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: PLAN_DONE → AGGREGATING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager Transition: Node root PLAN_DONE -> AGGREGATING (Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Execution Step 6 of 250 ---\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  CycleManager: Processing AGGREGATING Node: root (Layer: 0, Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Processing node root (Status: AGGREGATING, Type: PLAN, Goal: 'What are the main differences ...')\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Trace already exists for node root, returning existing\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  AggregatingNodeHandler: Handling AGGREGATING node root (Blueprint: N/A, Goal: 'What are the main differences ...', Original Agent Name at Entry: None)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mTask root: Potentially invalid status transition AGGREGATING → RUNNING\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: AGGREGATING → RUNNING. Result: N/A... Error: None\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mIncluding FULL child content (208 words, 1531 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Child root.1: full (1531 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mIncluding FULL child content (326 words, 2476 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Child root.2: full (2476 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mIncluding FULL child content (699 words, 5193 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Child root.3: full (5193 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mIncluding FULL child content (181 words, 1353 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    Child root.4: full (1353 chars)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Total child content for aggregation: 10553 chars from 4 children\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAgentRegistry: Found adapter 'AggregatorAdapter' for key ('aggregate', None) for node root.\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m    AggregatingNodeHandler: Invoking AGGREGATE adapter 'DefaultAggregator' for root\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  AggregatorAdapter: Processing aggregation task for node root\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Adapter 'DefaultAggregator': Processing node root (Goal: 'What are the main differences between supervised a...')\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1m🚨 ADAPTER MISMATCH: Node root (status: RUNNING) is using aggregator adapter 'AggregatorAdapter' but not in AGGREGATING status!\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mCreating new execution stage for node root\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 10 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Started stage 'execution' for node root (stage_id: e47c559b-43f3-4f43-872c-5de3161bab86)\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔥 Falling back to old context method for DefaultAggregator\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 11 stages\u001b[0m\n",
      "\u001b[32m15:34:56\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root with ['agent_name', 'adapter_name', 'model_info', 'system_prompt', 'user_input', 'input_context', 'processing_parameters']\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1m🚨 ADAPTER MISMATCH: Node root (status: RUNNING) is using aggregator adapter 'AggregatorAdapter' but not in AGGREGATING status!\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Truncated response from 6262 to 5000 characters for tracing\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 11 stages\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Updated stage 'execution' for node root with ['llm_response']\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mAdapter 'DefaultAggregator': Successfully processed. Type of actual_content_data: <class 'str'>\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1m🚨 ADAPTER MISMATCH: Node root (status: RUNNING) is using aggregator adapter 'AggregatorAdapter' but not in AGGREGATING status!\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 EXECUTION: Storing output data (2043 characters) for tracing\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Retrieved in-memory trace for node root: 11 stages\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m🔍 TRACE: Completed stage 'execution' for node root\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mTask root status: RUNNING → DONE. Result: The main differences between supervised and unsupe... Error: None\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m    AggregatingNodeHandler: Node root aggregation complete. Status: DONE.\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNode root status changed from AGGREGATING to DONE or has new results/errors. Updating knowledge store.\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mKnowledgeStore: Added/Updated record for root\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1mNodeProcessor: Finished processing for node root. Final status: DONE\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[32m\u001b[1mSUCCESS\u001b[0m | \u001b[32m\u001b[1m\n",
      "--- Execution Finished: No active nodes left. ---\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m\n",
      "--- Final Node Statuses & Results ---\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Node: root            Layer: 0 Status: DONE         Goal: 'What are the main differences between su...' Result: The main differences between supervised and unsupervised machine learning are profound and stem primarily from the nature of the data they process and..., OutputSummary: Planned with 4 sub-tasks.\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Node: root.1          Layer: 1 Status: DONE         Goal: 'Define supervised machine learning, focu...' Result: {'output_text': 'Supervised machine learning is a fundamental artificial '\n",
      "                'intelligence technique that involves training algorithms o..., OutputSummary: Execution completed. Data type: dict\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Node: root.2          Layer: 1 Status: DONE         Goal: 'Define unsupervised machine learning, fo...' Result: {'output_text': 'Unsupervised machine learning is a branch of machine learning '\n",
      "                'that analyzes and interprets unlabeled datasets to d..., OutputSummary: Execution completed. Data type: dict\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Node: root.3          Layer: 1 Status: DONE         Goal: 'Compare the key characteristics of super...' Result: {'output_text': '# Analysis Summary\\n'\n",
      "                '\\n'\n",
      "                '## Key Findings\\n'\n",
      "                '*   The fundamental distinction lies ..., OutputSummary: Execution completed. Data type: dict\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m  Node: root.4          Layer: 1 Status: DONE         Goal: 'Synthesize the findings from the previou...' Result: {'output_text': 'The primary difference between supervised and unsupervised '\n",
      "                'machine learning lies in the nature of the input data t..., OutputSummary: Execution completed. Data type: dict\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[33m\u001b[1mWARNING\u001b[0m | \u001b[33m\u001b[1mNo configuration file found, will use environment variables and defaults\u001b[0m\n",
      "\u001b[32m15:35:04\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[1m[exec_74580a84] Execution completed: 5 nodes, 59.00s\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/salahalzubi/cursor_projects/SentientResearchAgent/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings:\n",
      "  PydanticSerializationUnexpectedValue(Expected 9 fields but got 5: Expected `Message` - serialized value may not be as expected [input_value=Message(content='The main...one, 'reasoning': None}), input_type=Message])\n",
      "  PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...finish_reason': 'STOP'}), input_type=Choices])\n",
      "  return self.__pydantic_serializer__.to_python(\n"
     ]
    }
   ],
   "source": [
    "result = agent.execute(goal=goal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The main differences between supervised and unsupervised machine learning are profound and stem primarily from the nature of the data they process and, consequently, their objectives and applications.\n",
      "\n",
      "**Supervised Machine Learning:**\n",
      "Supervised machine learning is a fundamental artificial intelligence technique that involves training algorithms on **labeled data**. This means that each piece of input data in the training set is explicitly paired with the correct or desired output. The algorithm learns by identifying underlying patterns and relationships between these inputs and their corresponding outputs.\n",
      "\n",
      "The primary goal of supervised learning is either **prediction** or **classification**:\n",
      "*   **Prediction (Regression)**: Aims to predict a continuous numerical value. For instance, predicting house prices based on features like size and location, or forecasting sales figures. Algorithms like **Linear Regression** model the relationship between a dependent variable and one or more independent variables through a linear equation to achieve this.\n",
      "*   **Classification**: Aims to predict a categorical label. Examples include classifying emails as \"spam\" or \"not spam,\" or identifying different types of objects in images. **Decision Trees** are common algorithms used for both classification and regression, splitting data into subsets based on attributes to form a decision-based outcome.\n",
      "\n",
      "Supervised learning excels at well-defined predictive tasks where historical labeled data is available, and its performance can be easily measured against known ground truth. However, it requires significant upfront investment in data collection, cleaning, and accurate labeling, which can be a major bottleneck.\n",
      "\n",
      "**Unsupervised Machine Learning:**\n",
      "In contrast, unsupervised machine learning analyzes and interprets **unlabeled datasets** to discover hidden patterns, groupings, or structures without any explicit guidance or pre-defined outputs. Unlike supervised learning, where data comes with specific categories or outcomes, unsupervised algorithms work independently to find inherent relationships within the data.\n",
      "\n",
      "The primary goal of unsupervised learning is **discovery**, to uncover intrinsic structures and insights that might not be immediately obvious. This includes tasks such as:\n",
      "*   **Clustering**: Grouping similar data points together based on their inherent characteristics. This helps in identifying natural groupings in data, often used for customer segmentation or document clustering. **K-Means Clustering** is a common algorithm that groups unlabeled data points into a pre-defined number of clusters (K) by minimizing the distance between data points and their assigned cluster's centroid.\n",
      "*   **Association**: Discovering rules or relationships between variables in a dataset, such as items frequently bought together (e.g., in market basket analysis).\n",
      "*   **Dimensionality Reduction**: Simplifying complex data by reducing the number of features or variables while retaining essential information. This can make data easier to visualize and analyze, and can also speed up other machine learning algorithms by reducing model complexity. **Principal Component Analysis (PCA)** is a dimensionality reduction technique that transforms a dataset with many variables into a smaller set of uncorrelated variables called principal components, identifying directions that capture the most variance.\n",
      "\n",
      "Unsupervised learning enables the discovery of novel insights and patterns without preconceived notions, making it essential for exploratory data analysis, especially when labeling data is impractical or impossible. However, its results can be more subjective and harder to evaluate, as there is no \"correct\" answer to compare against, and the utility of discovered patterns is not always immediately clear.\n",
      "\n",
      "**Side-by-Side Comparison:**\n",
      "\n",
      "| Feature               | Supervised Learning                                      | Unsupervised Learning                                     |\n",
      "| :-------------------- | :------------------------------------------------------- | :-------------------------------------------------------- |\n",
      "| **Input Data**        | **Labeled data**: Each data point has a known output/label. | **Unlabeled data**: No pre-defined outputs or labels.     |\n",
      "| **Goal/Objective**    | **Prediction**: To learn a mapping function from input to output to predict future outcomes for new, unseen data. | **Discovery**: To model underlying structure or distribution, finding hidden patterns, groupings, or insights within the data. |\n",
      "| **Common Tasks**      | **Classification**: Predicting categorical labels (e.g., spam detection, image recognition). | **Clustering**: Grouping similar data points (e.g., customer segmentation). |\n",
      "|                       | **Regression**: Predicting continuous numerical values (e.g., house price forecasting, sales prediction). | **Dimensionality Reduction**: Reducing variables while preserving information (e.g., data compression, visualization). |\n",
      "|                       |                                                          | **Association**: Discovering relationships between variables (e.g., market basket analysis). |\n",
      "| **Example Algorithms** | Linear Regression, Decision Trees, Support Vector Machines. | K-Means Clustering, Principal Component Analysis (PCA).   |\n",
      "| **Benefits**          | High accuracy for well-defined predictive tasks. Easily measurable performance. | Discover novel insights without preconceived notions. Essential for exploratory data analysis. |\n",
      "| **Costs/Challenges**  | Requires significant investment in data labeling (time, resources). | Results can be subjective and harder to evaluate; no \"correct\" answer. |\n",
      "\n",
      "In summary, the fundamental distinction lies in the **input data**: supervised learning requires labeled data for explicit prediction or classification, while unsupervised learning operates on unlabeled data to discover hidden patterns and structures. This core difference dictates their respective goals and applications, with supervised learning being task-driven and unsupervised learning being data-driven. While distinct, hybrid approaches like semi-supervised learning also exist, leveraging both labeled and unlabeled data.\n"
     ]
    }
   ],
   "source": [
    "print(result.get(\"final_output\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
