from utils import get_today_str
from data import AgentState, ClarifyWithUser
from langchain_core.runnables import RunnableConfig
from langgraph.types import Command
from typing import Literal
from typing import Annotated, Optional
from data import ResearchQuestion
from llm import llm
from prompts import (
transform_messages_into_research_topic_prompt,
lead_researcher_prompt
)
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    filter_messages,
    get_buffer_string,
)
from langgraph.graph import END, START, StateGraph

async def write_research_brief(state: AgentState, config: RunnableConfig) -> Command[Literal["research_supervisor"]]:
# async def write_research_brief(state: AgentState, config: RunnableConfig):
    """Transform user messages into a structured research brief and initialize supervisor.
    
    This function analyzes the user's messages and generates a focused research brief
    that will guide the research supervisor. It also sets up the initial supervisor
    context with appropriate prompts and instructions.
    
    Args:
        state: Current agent state containing user messages
        config: Runtime configuration with model settings
        
    Returns:
        Command to proceed to research supervisor with initialized context
    """
    # Step 1: Set up the research model for structured output
    # configurable = Configuration.from_runnable_config(config)
    # research_model_config = {
    #     "model": configurable.research_model,
    #     "max_tokens": configurable.research_model_max_tokens,
    #     "api_key": get_api_key_for_model(configurable.research_model, config),
    #     "tags": ["langsmith:nostream"]
    # }
    
    # Configure model for structured research question generation
    research_model = (
        llm
        .with_structured_output(ResearchQuestion)
        # .with_retry(stop_after_attempt=configurable.max_structured_output_retries)
        # .with_config(research_model_config)
    )
    
    # Step 2: Generate structured research brief from user messages
    prompt_content = transform_messages_into_research_topic_prompt.format(
        messages=get_buffer_string(state.get("messages", [])),
        date=get_today_str()
    )
    response = await research_model.ainvoke([HumanMessage(content=prompt_content)])
    print("++++++++++++++++ write_research_brief: return +++++++++++++++")
    # print(response)
    # print("\n")

    # Step 3: Initialize supervisor with research brief and instructions
    supervisor_system_prompt = lead_researcher_prompt.format(
        date=get_today_str(),
        max_concurrent_research_units = 3,
        max_researcher_iterations = 3,
        # max_concurrent_research_units=configurable.max_concurrent_research_units,
        # max_researcher_iterations=configurable.max_researcher_iterations,
    )
    
    # return {"messages": [AIMessage(content=response.research_brief)]}

    return Command(
        goto="research_supervisor", 
        update={
            "research_brief": response.research_brief,
            "supervisor_messages": {
                "type": "override",
                "value": [
                    SystemMessage(content=supervisor_system_prompt),
                    HumanMessage(content=response.research_brief)
                ]
            }
        }
    )
