from langgraph.checkpoint.memory import InMemorySaver
from langgraph.types import interrupt
from langgraph.prebuilt import create_react_agent
import json
import asyncio
from typing import Optional, Tuple, Dict, Any
from langchain_openai import ChatOpenAI
from langgraph.types import Command
from langchain_tavily import TavilySearch
from langchain_core.tools import BaseTool
import os
import requests
from typing import Callable
from langchain_core.tools import BaseTool, tool as create_tool
from langchain_core.runnables import RunnableConfig
from langgraph.types import interrupt 
from langgraph.prebuilt.interrupt import HumanInterruptConfig, HumanInterrupt

def add_human_in_the_loop(
    tool: Callable | BaseTool,
    *,
    interrupt_config: HumanInterruptConfig = None,
) -> BaseTool:
    """Wrap a tool to support human-in-the-loop review.""" 
    
    if not isinstance(tool, BaseTool):
        tool = create_tool(tool)

    @create_tool(  
        tool.name,
        description=tool.description,
        args_schema=tool.args_schema
    )
    def call_tool_with_interrupt(config: RunnableConfig, **tool_input):
        request: HumanInterrupt = {
            "action_request": {
                "action": tool.name,
                "args": tool_input
            },
            "config": interrupt_config,
            "description": "请审核以下工具调用",
        }
        response = interrupt(request)
        print(f"response: {response}")
        
        if response["type"] == "accept":
            print("工具调用已批准，执行中...")
            print(f"调用工具: {tool.name}, 参数: {tool_input}")
            try:
                tool_response = tool.invoke(input=tool_input)
                print(tool_response)
            except Exception as e:
                print(f"工具调用失败: {e}")

        elif response["type"] == "edit":
            tool_input = response["args"]
            try:
                tool_response = tool.invoke(input=tool_input)
                print(tool_response)
            except Exception as e:
                print(f"工具调用失败: {e}")
            
        elif response["type"] == "reject":
            print("工具调用被拒绝，等待用户输入...")
            tool_response = '该工具被拒绝使用，请尝试其他方法或拒绝回答问题。'
        else:
            raise ValueError(f"Unsupported interrupt response type: {response['type']}")

        return tool_response

    return call_tool_with_interrupt

# 创建LLM
model = ChatOpenAI(
    model='gpt-4o-mini',
)

# Tavily搜索工具，需要人工审核/批准
def tavily_search(query: str, search_depth: Optional[str] = "basic"):
    """
    使用Tavily进行网络搜索
    
    参数:
    - query: 搜索查询
    - search_depth: 搜索深度，可选值为 "basic"（基础搜索）或 "advanced"（深度搜索）
    """
    try:
        search = TavilySearch(api_key=os.environ.get("TAVILY_API_KEY"))
        search_results = search.invoke({
            "query": query,
            "search_depth": search_depth,
            "max_results": 5
        })
    except Exception as e:
        raise ValueError(f"Tavily API调用失败: {str(e)}")
    
    return json.dumps(search_results, ensure_ascii=False, indent=2)

def create_tavily_search_agent() -> Tuple[Any, Dict[str, Any]]:
    """
    创建一个带有Tavily搜索工具的智能体
    
    Returns:
        Tuple[Any, Dict[str, Any]]: 包含(agent, config)的元组，其中:
            - agent: 已配置的智能体实例
            - config: 智能体配置
    """
    # 创建检查点保存器
    checkpointer = InMemorySaver() 
    
    # 智能体配置
    config = {
        "configurable": {
            "thread_id": "1"
        }
    }
    
    # 创建智能体
    agent = create_react_agent(
        model=model,
        tools=[add_human_in_the_loop(tavily_search)],
        checkpointer=checkpointer
    )
    
    return agent, config

async def main():
    """主函数，异步创建并运行智能体"""
    # 创建智能体
    agent, config = create_tavily_search_agent()
    
    # 开始流程
    result =  agent.invoke(
        {"messages": [{"role": "system", "content": "你会使用工具来帮助用户。如果工具使用被拒绝，请提示用户。"},
                      {"role": "user", "content": "帮我查找最新的哪吒3的消息"}]},
        config=config
    )
    
    # 处理可能的中断
    while "__interrupt__" in result:
        interrupt_info = result["__interrupt__"][0].value
        print(f"\n{interrupt_info.get('description')}: ", end="")
        print(f"请求工具: {interrupt_info.get('action_request', {}).get('action')}")
        print(f"请求参数: {interrupt_info.get('action_request', {}).get('args')}")
        print("请输入 'yes' 接受，'no' 拒绝，或 'edit' 修改查询关键词: ", end="")
        user_input = input()
        
        # 处理用户输入
        while True:
            if user_input.lower() == "yes":
                # 用户接受当前文本
                result = await agent.ainvoke(
                    Command(resume={"type": "accept"}),
                    config=config
                )
                break
            elif user_input.lower() == "no":
                # 用户拒绝
                result = await agent.ainvoke(
                    Command(resume={"type": "reject"}),
                    config=config
                )
                break
            elif user_input.lower() == "edit":
                # 用户选择编辑，获取新的查询内容
                print("请输入新的搜索内容: ", end="")
                new_query = input()
                result = await agent.ainvoke(
                    Command(resume={"type": "edit", "args": {"query": new_query}}),
                    config=config
                )
                break
            else:
                # 无效输入，提示用户重新输入
                print("无效输入，请输入 'yes'、'no' 或 'edit': ", end="")
                user_input = input()
    
    # 打印最终结果
    if result.get("messages"):
        print("\n=== 最终回答 ===")
        print(result['messages'][-1].content)
    
    return result

# 如果直接运行此脚本，则执行main函数
if __name__ == "__main__":
    # 创建事件循环并运行main函数
    asyncio.run(main())