from data import AgentState, AgentInputState, SupervisorState
from langchain_core.runnables import RunnableConfig
from langgraph.types import Command
from typing import Literal
from langgraph.graph import END, START, StateGraph
from langchain_core.messages import MessageLikeRepresentation
from langchain_core.messages import (
    AIMessage,
    HumanMessage,
    SystemMessage,
    ToolMessage,
    filter_messages,
    get_buffer_string,
)
import asyncio
from configuration import Configuration
from researcher_graph import researcher_subgraph

ABC = """
Here’s a summary of the **2023-2024** U.S. weather trends, based on NOAA (National Oceanic and Atmospheric Administration) and other climate reports:

### **Temperature Trends**  
1. **Above-Average Warmth** – The U.S. experienced one of the **warmest years on record**, with temperatures **1-3°F above the 20th-century average** in most regions.  
2. **Seasonal Extremes**:  
   - **Winter (2023-24)**: Milder than usual in the Northern Plains and Midwest, but cold outbreaks affected the South.  
   - **Summer (2023)**: Extreme heatwaves in the Southwest (Arizona, Texas) and Southeast, with record-breaking temperatures (e.g., Phoenix hit **110°F+ for 31 consecutive days**).  
   - **Fall/Winter Transition**: Unseasonable warmth in the Northeast, delaying snowfall.  

3. **Regional Variations**:  
   - **South/Southwest**: Persistent heat, exacerbating drought conditions.  
   - **Northwest & Northern Rockies**: Near-normal or slightly cooler temps in some areas.  

### **Precipitation Patterns**  
1. **Wet & Dry Extremes**:  
   - **Above-average rainfall**: The Northeast and Midwest faced frequent storms, leading to flooding (e.g., Vermont floods in July 2023).  
   - **Drought conditions**: The South (Texas, Louisiana) and Southwest (Arizona, New Mexico) saw prolonged dryness, worsening water shortages.  
   - **California**: A very wet winter (2023-24) from atmospheric rivers ended drought but caused flooding and landslides.  

2. **Snowfall**:  
   - **Below-average snowpack** in the Sierra Nevada early in the year but heavy late-season snow in the Rockies.  
   - **Northeast**: Some regions had less snow than usual due to warmer temps.  

3. **Severe Weather**:  
   - **Tornadoes**: Higher-than-average activity in the South and Midwest.  
   - **Hurricanes**: The Atlantic season was near-average, with **Hurricane Idalia** (Category 3) hitting Florida in August 2023.  

### **Key Takeaways**  
- **Warmer-than-normal** conditions dominated, consistent with long-term climate trends.  
- **Precipitation varied sharply**, with extreme wet events in some regions and worsening droughts in others.  
- **Increased climate volatility** led to more intense storms, heatwaves, and weather-related disasters.  

For more detailed regional breakdowns, NOAA’s **Annual Climate Report** provides further analysis. Let me know if you'd like specifics on a particular state or season!
"""

def get_notes_from_tool_calls(messages: list[MessageLikeRepresentation]):
    """Extract notes from tool call messages."""
    return [tool_msg.content for tool_msg in filter_messages(messages, include_types="tool")]

async def supervisor_tools(state: SupervisorState, config: RunnableConfig) -> Command[Literal["supervisor", "__end__"]]:
    """Execute tools called by the supervisor, including research delegation and strategic thinking.
    
    This function handles three types of supervisor tool calls:
    1. think_tool - Strategic reflection that continues the conversation
    2. ConductResearch - Delegates research tasks to sub-researchers
    3. ResearchComplete - Signals completion of research phase
    
    Args:
        state: Current supervisor state with messages and iteration count
        config: Runtime configuration with research limits and model settings
        
    Returns:
        Command to either continue supervision loop or end research phase
    """
    # Step 1: Extract current state and check exit conditions
    configurable = Configuration.from_runnable_config(config)

    supervisor_messages = state.get("supervisor_messages", [])
    research_iterations = state.get("research_iterations", 0)
    most_recent_message = supervisor_messages[-1]

    # print(f"++++++++++++++++++ ({research_iterations}) supervisor_tools: most_recent_message ++++++++++++++++++++++")
    # print("configurable.max_researcher_iterations:", configurable.max_researcher_iterations)
    # print(most_recent_message)
    # print("\n")

    # Define exit criteria for research phase
    exceeded_allowed_iterations = research_iterations > configurable.max_researcher_iterations
    # exceeded_allowed_iterations = research_iterations > 10

    no_tool_calls = not most_recent_message.tool_calls

    # print(f"++++++++++++++++++ ({research_iterations}) supervisor_tools: most_recent_message.tool_calls ++++++++++++++++++++++")
    # print(most_recent_message.tool_calls)
    # print("\n")

    research_complete_tool_call = any(
        tool_call["name"] == "ResearchComplete" 
        for tool_call in most_recent_message.tool_calls
    )
    
    # Exit if any termination condition is met
    if exceeded_allowed_iterations or no_tool_calls or research_complete_tool_call:
        updated_data = {
            "notes": get_notes_from_tool_calls(supervisor_messages),
            "research_brief": state.get("research_brief", "")
        }
        print(f"++++++++++++++++++ ({research_iterations}) supervisor_tools: return to end with notes ++++++++++++++++++++++")
        print(f"exceeded_allowed_iterations: {exceeded_allowed_iterations}, no_tool_calls: {no_tool_calls}, research_complete_tool_call: {research_complete_tool_call}")
        # print(updated_data)
        # print("\n")
        return Command(
            goto=END,
            update=updated_data,
            # update={
            #     "notes": get_notes_from_tool_calls(supervisor_messages),
            #     "research_brief": state.get("research_brief", "")
            # }
        )
    
    # Step 2: Process all tool calls together (both think_tool and ConductResearch)
    all_tool_messages = []
    update_payload = {"supervisor_messages": []}
    
    # Handle think_tool calls (strategic reflection)
    think_tool_calls = [
        tool_call for tool_call in most_recent_message.tool_calls 
        if tool_call["name"] == "think_tool"
    ]
    
    for tool_call in think_tool_calls:
        reflection_content = tool_call["args"]["reflection"]
        all_tool_messages.append(ToolMessage(
            content=f"Reflection recorded: {reflection_content}",
            name="think_tool",
            tool_call_id=tool_call["id"]
        ))
    
    # Handle ConductResearch calls (research delegation)
    conduct_research_calls = [
        tool_call for tool_call in most_recent_message.tool_calls 
        if tool_call["name"] == "ConductResearch"
    ]
    # print("\n")
    if conduct_research_calls:
        print(f"++++++++++++++++++ ({research_iterations}) supervisor_tools: conduct_research_calls ++++++++++++++++++++++")
        print("conduct_research_calls", conduct_research_calls)
        try:
            # Limit concurrent research units to prevent resource exhaustion
            # allowed_conduct_research_calls = conduct_research_calls[:configurable.max_concurrent_research_units]
            # overflow_conduct_research_calls = conduct_research_calls[configurable.max_concurrent_research_units:]
            allowed_conduct_research_calls = conduct_research_calls[:1]
            overflow_conduct_research_calls = conduct_research_calls[1:]
            
            # Execute research tasks in parallel
            research_tasks = [
                researcher_subgraph.ainvoke({
                    "researcher_messages": [
                        HumanMessage(content=tool_call["args"]["research_topic"])
                    ],
                    "research_topic": tool_call["args"]["research_topic"]
                }, config) 
                for tool_call in allowed_conduct_research_calls
            ]
            
            tool_results = await asyncio.gather(*research_tasks)

            # tool_results = [ 
            #     {
            #         "compressed_research": ABC,
            #         "raw_notes": ["mock search result"]
            #     }
            # ]
            
            # Create tool messages with research results
            for observation, tool_call in zip(tool_results, allowed_conduct_research_calls):
                all_tool_messages.append(ToolMessage(
                    content=observation.get("compressed_research", "Error synthesizing research report: Maximum retries exceeded"),
                    name=tool_call["name"],
                    tool_call_id=tool_call["id"]
                ))
            
            # Handle overflow research calls with error messages
            for overflow_call in overflow_conduct_research_calls:
                all_tool_messages.append(ToolMessage(
                    # content=f"Error: Did not run this research as you have already exceeded the maximum number of concurrent research units. Please try again with {configurable.max_concurrent_research_units} or fewer research units.",
                    content=f"Error: Did not run this research as you have already exceeded the maximum number of concurrent research units. Please try again with 3 or fewer research units.",
                    name="ConductResearch",
                    tool_call_id=overflow_call["id"]
                ))
            
            # Aggregate raw notes from all research results
            raw_notes_concat = "\n".join([
                "\n".join(observation.get("raw_notes", [])) 
                for observation in tool_results
            ])
            
            if raw_notes_concat:
                update_payload["raw_notes"] = [raw_notes_concat]
                
        except Exception as e:
            print(f"++++++++++++++++++++ ({research_iterations}) supervisor_tools: goto end with error +++++++++++++++++++++++")
            print(e)
            # print("\n")
            # Handle research execution errors
            # if is_token_limit_exceeded(e, configurable.research_model) or True:
                # Token limit exceeded or other error - end research phase
            return Command(
                goto=END,
                update={
                    "notes": get_notes_from_tool_calls(supervisor_messages),
                    "research_brief": state.get("research_brief", "")
                }
            )
    
    # Step 3: Return command with all tool results
    update_payload["supervisor_messages"] = all_tool_messages
    print(f"++++++++++++++++++ ({research_iterations}) supervisor_tools: return with update_payload ++++++++++++++++++++++")
    # print(update_payload)  # Debugging line to print the update payload
    # print("\n")
    return Command(
        goto="supervisor",
        update=update_payload
    ) 


