from utils import get_today_str
from langgraph.graph import END, START, StateGraph
from data import AgentState, ClarifyWithUser
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    filter_messages,
    get_buffer_string,
)
from prompts import (
    clarify_with_user_instructions,
    compress_research_simple_human_message,
    compress_research_system_prompt,
    final_report_generation_prompt,
    lead_researcher_prompt,
    research_system_prompt,
    transform_messages_into_research_topic_prompt,
)
from llm import llm

async def final_report_generation(state: AgentState, config: RunnableConfig):
    """Generate the final comprehensive research report with retry logic for token limits.
    
    This function takes all collected research findings and synthesizes them into a 
    well-structured, comprehensive final report using the configured report generation model.
    
    Args:
        state: Agent state containing research findings and context
        config: Runtime configuration with model settings and API keys
        
    Returns:
        Dictionary containing the final report and cleared state
    """
    # Step 1: Extract research findings and prepare state cleanup
    notes = state.get("notes", [])
    cleared_state = {"notes": {"type": "override", "value": []}}
    findings = "\n".join(notes)
    
    # Step 2: Configure the final report generation model
    # configurable = Configuration.from_runnable_config(config)
    # writer_model_config = {
    #     "model": configurable.final_report_model,
    #     "max_tokens": configurable.final_report_model_max_tokens,
    #     "api_key": get_api_key_for_model(configurable.final_report_model, config),
    #     "tags": ["langsmith:nostream"]
    # }
    
    # Step 3: Attempt report generation with token limit retry logic
    max_retries = 4
    current_retry = 0
    findings_token_limit = None
    
    while current_retry <= max_retries:
        current_retry += 1
        try:
            # Create comprehensive prompt with all research context
            final_report_prompt = final_report_generation_prompt.format(
                research_brief=state.get("research_brief", ""),
                messages=get_buffer_string(state.get("messages", [])),
                findings=findings,
                date=get_today_str()
            )
            
            # Generate the final report
            # final_report = await configurable_model.with_config(writer_model_config).ainvoke([
            #     HumanMessage(content=final_report_prompt)
            # ])
            final_report = await llm.ainvoke([
                HumanMessage(content=final_report_prompt)
            ])
            
            print(f"++++++++++++++++++ ({current_retry}) final_report_generation: return ++++++++++++++++++")
            print(final_report)
            # print("\n")

            # Return successful report generation
            return {
                "final_report": final_report.content, 
                "messages": [final_report],
                **cleared_state
            }
            
        except Exception as e:
            if current_retry < max_retries:
                continue
            else:
                return {
                    "final_report": f"Error generating final report: {e}",
                    "messages": [AIMessage(content="Report generation failed due to an error")],
                    **cleared_state
                }
            # Handle token limit exceeded errors with progressive truncation
            # if is_token_limit_exceeded(e, configurable.final_report_model):
            #     current_retry += 1
                
            #     if current_retry == 1:
            #         # First retry: determine initial truncation limit
            #         model_token_limit = get_model_token_limit(configurable.final_report_model)
            #         if not model_token_limit:
            #             return {
            #                 "final_report": f"Error generating final report: Token limit exceeded, however, we could not determine the model's maximum context length. Please update the model map in deep_researcher/utils.py with this information. {e}",
            #                 "messages": [AIMessage(content="Report generation failed due to token limits")],
            #                 **cleared_state
            #             }
            #         # Use 4x token limit as character approximation for truncation
            #         findings_token_limit = model_token_limit * 4
            #     else:
            #         # Subsequent retries: reduce by 10% each time
            #         findings_token_limit = int(findings_token_limit * 0.9)
                
            #     # Truncate findings and retry
            #     findings = findings[:findings_token_limit]
            #     continue
            # else:
            #     # Non-token-limit error: return error immediately
            #     return {
            #         "final_report": f"Error generating final report: {e}",
            #         "messages": [AIMessage(content="Report generation failed due to an error")],
            #         **cleared_state
            #     }

    # Step 4: Return failure result if all retries exhausted
    return {
        "final_report": "Error generating final report: Maximum retries exceeded",
        "messages": [AIMessage(content="Report generation failed after maximum retries")],
        **cleared_state
    }
