from data import AgentState, AgentInputState, SupervisorState, ResearchComplete, ConductResearch
from langchain_core.runnables import RunnableConfig
from langgraph.types import Command
from typing import Literal
from pydantic import BaseModel, Field
from llm import llm
from langchain_core.tools import (
    BaseTool,
    InjectedToolArg,
    StructuredTool,
    ToolException,
    tool,
)
from utils import think_tool


async def supervisor(state: SupervisorState, config: RunnableConfig) -> Command[Literal["supervisor_tools"]]:
# async def supervisor(state: SupervisorState, config: RunnableConfig):
    """Lead research supervisor that plans research strategy and delegates to researchers.
    
    The supervisor analyzes the research brief and decides how to break down the research
    into manageable tasks. It can use think_tool for strategic planning, ConductResearch
    to delegate tasks to sub-researchers, or ResearchComplete when satisfied with findings.
    
    Args:
        state: Current supervisor state with messages and research context
        config: Runtime configuration with model settings
        
    Returns:
        Command to proceed to supervisor_tools for tool execution
    """
    # Step 1: Configure the supervisor model with available tools
    # configurable = Configuration.from_runnable_config(config)
    # research_model_config = {
    #     "model": configurable.research_model,
    #     "max_tokens": configurable.research_model_max_tokens,
    #     "api_key": get_api_key_for_model(configurable.research_model, config),
    #     "tags": ["langsmith:nostream"]
    # }
    
    # Available tools: research delegation, completion signaling, and strategic thinking
    lead_researcher_tools = [ConductResearch, ResearchComplete, think_tool]
    
    # Configure model with tools, retry logic, and model settings
    research_model = (
        llm
        .bind_tools(lead_researcher_tools)
        # .with_retry(stop_after_attempt=configurable.max_structured_output_retries)
        # .with_config(research_model_config)
    )
    
    # Step 2: Generate supervisor response based on current context
    supervisor_messages = state.get("supervisor_messages", [])
    response = await research_model.ainvoke(supervisor_messages)
    print("++++++++++++++++ supervisor: return +++++++++++++++")
    # print(response)
    # print("\n")
    
    # return {"messages": [response]}

    # Step 3: Update state and proceed to tool execution
    return Command(
        goto="supervisor_tools",
        update={
            "supervisor_messages": [response],
            "research_iterations": state.get("research_iterations", 0) + 1
        }
    )
