from utils import get_today_str
from langgraph.graph import END, START, StateGraph
from data import AgentState, ClarifyWithUser
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    filter_messages,
    get_buffer_string,
)
from prompts import (
    clarify_with_user_instructions,
    compress_research_simple_human_message,
    compress_research_system_prompt,
    final_report_generation_prompt,
    lead_researcher_prompt,
    research_system_prompt,
    transform_messages_into_research_topic_prompt,
)
from langgraph.types import Command
from typing import Literal

from llm import llm

async def clarify_with_user(state: AgentState, config: RunnableConfig) -> Command[Literal["write_research_brief", END]]:
# async def clarify_with_user(state: AgentState, config: RunnableConfig):
    """Analyze user messages and ask clarifying questions if the research scope is unclear.
    
    This function determines whether the user's request needs clarification before proceeding
    with research. If clarification is disabled or not needed, it proceeds directly to research.
    
    Args:
        state: Current agent state containing user messages
        config: Runtime configuration with model settings and preferences
        
    Returns:
        Command to either end with a clarifying question or proceed to research brief
    """
    # Step 1: Check if clarification is enabled in configuration
    # configurable = Configuration.from_runnable_config(config)
    # if not configurable.allow_clarification:
    #     # Skip clarification step and proceed directly to research
    #     return Command(goto="write_research_brief")
    
    # Step 2: Prepare the model for structured clarification analysis
    messages = state["messages"]
    # model_config = {
    #     "model": configurable.research_model,
    #     "max_tokens": configurable.research_model_max_tokens,
    #     "api_key": get_api_key_for_model(configurable.research_model, config),
    #     "tags": ["langsmith:nostream"]
    # }
    
    # Configure model with structured output and retry logic
    clarification_model = (
        llm
        .with_structured_output(ClarifyWithUser)
        # .with_retry(stop_after_attempt=configurable.max_structured_output_retries)
        # .with_config(model_config)
    )
    
    # Step 3: Analyze whether clarification is needed
    prompt_content = clarify_with_user_instructions.format(
        messages=get_buffer_string(messages), 
        date=get_today_str()
    )
    response = await clarification_model.ainvoke([HumanMessage(content=prompt_content)])
    print("++++++++++++++++ clarification-node: return +++++++++++++++")
    # print(response)
    # print("\n")

    # return {"messages": [AIMessage(content=response.question)]}

    # Step 4: Route based on clarification analysis
    if response.need_clarification:
        # End with clarifying question for user
        return Command(
            goto=END, 
            update={"messages": [AIMessage(content=response.question)]}
        )
    else:
        # Proceed to research with verification message
        return Command(
            goto="write_research_brief",  
            update={"messages": [AIMessage(content=response.verification)]}
        )
    