from datetime import datetime, timedelta, timezone
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import (
    BaseTool,
    InjectedToolArg,
    StructuredTool,
    ToolException,
    tool,
)
from data import AgentState, AgentInputState, SupervisorState, ResearchComplete, ConductResearch
from typing import Annotated, Any, Dict, List, Literal, Optional

def get_today_str() -> str:
    """Get current date formatted for display in prompts and outputs.
    
    Returns:
        Human-readable date string in format like 'Mon Jan 15, 2024'
    """
    return datetime.now().strftime("%a %b %-d, %Y")

##########################
# Reflection Tool Utils
##########################

@tool(description="Strategic reflection tool for research planning")
def think_tool(reflection: str) -> str:
    """Tool for strategic reflection on research progress and decision-making.

    Use this tool after each search to analyze results and plan next steps systematically.
    This creates a deliberate pause in the research workflow for quality decision-making.

    When to use:
    - After receiving search results: What key information did I find?
    - Before deciding next steps: Do I have enough to answer comprehensively?
    - When assessing research gaps: What specific information am I still missing?
    - Before concluding research: Can I provide a complete answer now?

    Reflection should address:
    1. Analysis of current findings - What concrete information have I gathered?
    2. Gap assessment - What crucial information is still missing?
    3. Quality evaluation - Do I have sufficient evidence/examples for a good answer?
    4. Strategic decision - Should I continue searching or provide my answer?

    Args:
        reflection: Your detailed reflection on research progress, findings, gaps, and next steps

    Returns:
        Confirmation that reflection was recorded for decision-making
    """
    return f"Reflection recorded: {reflection}"

##########################
# Tavily Search Tool Utils
##########################
TAVILY_SEARCH_DESCRIPTION = (
    "A search engine optimized for comprehensive, accurate, and trusted results. "
    "Useful for when you need to answer questions about current events."
)
@tool(description=TAVILY_SEARCH_DESCRIPTION)
async def tavily_search(
    queries: List[str],
    max_results: Annotated[int, InjectedToolArg] = 5,
    topic: Annotated[Literal["general", "news", "finance"], InjectedToolArg] = "general",
    config: RunnableConfig = None
) -> str:
    """Fetch and summarize search results from Tavily search API.

    Args:
        queries: List of search queries to execute
        max_results: Maximum number of results to return per query
        topic: Topic filter for search results (general, news, or finance)
        config: Runtime configuration for API keys and model settings

    Returns:
        Formatted string containing summarized search results
    """
    # Step 1: Execute search queries asynchronously
    # search_results = await tavily_search_async(
    #     queries,
    #     max_results=max_results,
    #     topic=topic,
    #     include_raw_content=True,
    #     config=config
    # )
    
    # Step 2: Deduplicate results by URL to avoid processing the same content multiple times
    # unique_results = {}
    # for response in search_results:
    #     for result in response['results']:
    #         url = result['url']
    #         if url not in unique_results:
    #             unique_results[url] = {**result, "query": response['query']}
    
    # Step 3: Set up the summarization model with configuration
    # configurable = Configuration.from_runnable_config(config)
    
    # Character limit to stay within model token limits (configurable)
    # max_char_to_include = configurable.max_content_length
    
    # Initialize summarization model with retry logic
    # model_api_key = get_api_key_for_model(configurable.summarization_model, config)
    # summarization_model = init_chat_model(
    #     model=configurable.summarization_model,
    #     max_tokens=configurable.summarization_model_max_tokens,
    #     api_key=model_api_key,
    #     tags=["langsmith:nostream"]
    # ).with_structured_output(Summary).with_retry(
    #     stop_after_attempt=configurable.max_structured_output_retries
    # )
    
    # Step 4: Create summarization tasks (skip empty content)
    # async def noop():
    #     """No-op function for results without raw content."""
    #     return None
    
    # summarization_tasks = [
    #     noop() if not result.get("raw_content") 
    #     else summarize_webpage(
    #         summarization_model, 
    #         result['raw_content'][:max_char_to_include]
    #     )
    #     for result in unique_results.values()
    # ]
    
    # Step 5: Execute all summarization tasks in parallel
    # summaries = await asyncio.gather(*summarization_tasks)
    
    # Step 6: Combine results with their summaries
    # summarized_results = {
    #     url: {
    #         'title': result['title'], 
    #         'content': result['content'] if summary is None else summary
    #     }
    #     for url, result, summary in zip(
    #         unique_results.keys(), 
    #         unique_results.values(), 
    #         summaries
    #     )
    # }
    
    # Step 7: Format the final output
    # if not summarized_results:
    #     return "No valid search results found. Please try different search queries or use a different search API."
    
    formatted_output = f"Search results: {TEMPABC} \n\n"
    # for i, (url, result) in enumerate(summarized_results.items()):
    #     formatted_output += f"\n\n--- SOURCE {i+1}: {result['title']} ---\n"
    #     formatted_output += f"URL: {url}\n\n"
    #     formatted_output += f"SUMMARY:\n{result['content']}\n\n"
    #     formatted_output += "\n\n" + "-" * 80 + "\n"
    
    return formatted_output

async def get_all_tools(config: RunnableConfig):
    """Assemble complete toolkit including research, search, and MCP tools.
    
    Args:
        config: Runtime configuration specifying search API and MCP settings
        
    Returns:
        List of all configured and available tools for research operations
    """
    # Start with core research tools
    tools = [tool(ResearchComplete), think_tool]
    
    search_tool = tavily_search
    search_tool.metadata = {
        **(search_tool.metadata or {}), 
        "type": "search", 
        "name": "web_search"
    }
    

    # Add configured search tools
    # configurable = Configuration.from_runnable_config(config)
    # search_api = SearchAPI(get_config_value(configurable.search_api))
    # search_tools = await get_search_tool(search_api)
    search_tools = [search_tool]
    tools.extend(search_tools)
    
    # Track existing tool names to prevent conflicts
    # existing_tool_names = {
    #     tool.name if hasattr(tool, "name") else tool.get("name", "web_search") 
    #     for tool in tools
    # }
    
    # Add MCP tools if configured
    # mcp_tools = await load_mcp_tools(config, existing_tool_names)
    # tools.extend(mcp_tools)
    
    return tools

TEMPABC = """
Here’s a summary of the **2023-2024** U.S. weather trends, based on NOAA (National Oceanic and Atmospheric Administration) and other climate reports:

### **Temperature Trends**  
1. **Above-Average Warmth** – The U.S. experienced one of the **warmest years on record**, with temperatures **1-3°F above the 20th-century average** in most regions.  
2. **Seasonal Extremes**:  
   - **Winter (2023-24)**: Milder than usual in the Northern Plains and Midwest, but cold outbreaks affected the South.  
   - **Summer (2023)**: Extreme heatwaves in the Southwest (Arizona, Texas) and Southeast, with record-breaking temperatures (e.g., Phoenix hit **110°F+ for 31 consecutive days**).  
   - **Fall/Winter Transition**: Unseasonable warmth in the Northeast, delaying snowfall.  

3. **Regional Variations**:  
   - **South/Southwest**: Persistent heat, exacerbating drought conditions.  
   - **Northwest & Northern Rockies**: Near-normal or slightly cooler temps in some areas.  

### **Precipitation Patterns**  
1. **Wet & Dry Extremes**:  
   - **Above-average rainfall**: The Northeast and Midwest faced frequent storms, leading to flooding (e.g., Vermont floods in July 2023).  
   - **Drought conditions**: The South (Texas, Louisiana) and Southwest (Arizona, New Mexico) saw prolonged dryness, worsening water shortages.  
   - **California**: A very wet winter (2023-24) from atmospheric rivers ended drought but caused flooding and landslides.  

2. **Snowfall**:  
   - **Below-average snowpack** in the Sierra Nevada early in the year but heavy late-season snow in the Rockies.  
   - **Northeast**: Some regions had less snow than usual due to warmer temps.  

3. **Severe Weather**:  
   - **Tornadoes**: Higher-than-average activity in the South and Midwest.  
   - **Hurricanes**: The Atlantic season was near-average, with **Hurricane Idalia** (Category 3) hitting Florida in August 2023.  

### **Key Takeaways**  
- **Warmer-than-normal** conditions dominated, consistent with long-term climate trends.  
- **Precipitation varied sharply**, with extreme wet events in some regions and worsening droughts in others.  
- **Increased climate volatility** led to more intense storms, heatwaves, and weather-related disasters.  

For more detailed regional breakdowns, NOAA’s **Annual Climate Report** provides further analysis. Let me know if you'd like specifics on a particular state or season!
"""