from data import AgentState, AgentInputState, SupervisorState, ResearcherState, ResearcherOutputState
from langchain_core.runnables import RunnableConfig
from langgraph.types import Command
from typing import Literal
from prompts import research_system_prompt
from utils import get_today_str
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    filter_messages,
    get_buffer_string,
)
from llm import llm
from utils import get_all_tools

async def researcher(state: ResearcherState, config: RunnableConfig) -> Command[Literal["researcher_tools"]]:
    """Individual researcher that conducts focused research on specific topics.
    
    This researcher is given a specific research topic by the supervisor and uses
    available tools (search, think_tool, MCP tools) to gather comprehensive information.
    It can use think_tool for strategic planning between searches.
    
    Args:
        state: Current researcher state with messages and topic context
        config: Runtime configuration with model settings and tool availability
        
    Returns:
        Command to proceed to researcher_tools for tool execution
    """
    # Step 1: Load configuration and validate tool availability
    # configurable = Configuration.from_runnable_config(config)
    researcher_messages = state.get("researcher_messages", [])
    
    # Get all available research tools (search, MCP, think_tool)
    tools = await get_all_tools(config)
    if len(tools) == 0:
        raise ValueError(
            "No tools found to conduct research: Please configure either your "
            "search API or add MCP tools to your configuration."
        )
    
    # Step 2: Configure the researcher model with tools
    # research_model_config = {
    #     "model": configurable.research_model,
    #     "max_tokens": configurable.research_model_max_tokens,
    #     "api_key": get_api_key_for_model(configurable.research_model, config),
    #     "tags": ["langsmith:nostream"]
    # }
    
    # Prepare system prompt with MCP context if available
    researcher_prompt = research_system_prompt.format(
        # mcp_prompt=configurable.mcp_prompt or "", 
        mcp_prompt="", 
        date=get_today_str()
    )
    
    # Configure model with tools, retry logic, and settings
    research_model = (
        llm
        .bind_tools(tools)
        # .with_retry(stop_after_attempt=configurable.max_structured_output_retries)
        # .with_config(research_model_config)
    )
    
    # Step 3: Generate researcher response with system context
    messages = [SystemMessage(content=researcher_prompt)] + researcher_messages
    response = await research_model.ainvoke(messages)
    
    print(f"++++++++++++++++++ researcher: goto researcher_tools ++++++++++++++++++++++")
    print(response)
    # print(f"\n")

    # Step 4: Update state and proceed to tool execution
    return Command(
        goto="researcher_tools",
        update={
            "researcher_messages": [response],
            "tool_call_iterations": state.get("tool_call_iterations", 0) + 1
        }
    )
