import getpass
import os
from dotenv import load_dotenv; load_dotenv()

from typing import Annotated

from typing_extensions import TypedDict

from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages

from langchain_openai import ChatOpenAI
from langchain_core.language_models import BaseChatModel

from langchain_core.tools import BaseTool
from IPython.display import Image, display

from langchain_core.runnables import RunnableConfig
from utils.my_callback import MyCustomHandlerTwo, MyCustomAsyncHandler

from langchain import hub
import json
from langchain_core.messages import ToolMessage, BaseMessage, AIMessage
from typing import Literal

from langchain_core.pydantic_v1 import BaseModel
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.prebuilt import ToolNode, tools_condition

def _set_env(var: str):
    if not os.environ.get(var):
        os.environ[var] = getpass.getpass(f"{var}: ")

class State(TypedDict):
    # Messages have the type "list". The `add_messages` function
    # in the annotation defines how this state key should be updated
    # (in this case, it appends messages to the list, rather than overwriting them)
    messages: Annotated[list, add_messages]
    # This flag is new
    ask_human: bool

class RequestAssistance(BaseModel):
    """Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.

    To use this function, relay the user's 'request' so the expert can provide the right guidance.
    """

    request: str

def chatbot(state: State):
    response = llm_with_tools.invoke(state["messages"])
    ask_human = False
    if response.tool_calls and response.tool_calls[0]["name"] == RequestAssistance.__name__:
        ask_human = True
    return {"messages": [response], "ask_human": ask_human}

def search_tool(debug=False) -> BaseTool :
    from langchain_community.tools.tavily_search import TavilySearchResults

    tool = TavilySearchResults(max_results=2)
    if debug:
        tool.invoke("What's a 'node' in LangGraph?")
    return tool

def create_response(response: str, ai_message: AIMessage):
    return ToolMessage(
        content=response,
        tool_call_id=ai_message.tool_calls[0]["id"],
    )

def human_node(state: State):
    new_messages = []
    if not isinstance(state["messages"][-1], ToolMessage):
        # Typically, the user will have updated the state during the interrupt.
        # If they choose not to, we will include a placeholder ToolMessage to
        # let the LLM continue.
        new_messages.append(
            create_response("No response from human.", state["messages"][-1])
        )
    return {
        # Append the new messages
        "messages": new_messages,
        # Unset the flag
        "ask_human": False,
    }

def select_next_node(state: State) -> Literal["human", "tools", "__end__"]:
    if state["ask_human"]:
        return "human"
    # Otherwise, we can route as before
    return tools_condition(state)

def route_tools(
    state: State,
) -> Literal["tools", "__end__"]:
    """
    Use in the conditional_edge to route to the ToolNode if the last message
    has tool calls. Otherwise, route to the end.
    """
    if isinstance(state, list):
        ai_message = state[-1]
    elif messages := state.get("messages", []):
        ai_message = messages[-1]
    else:
        raise ValueError(f"No messages found in input state to tool_edge: {state}")
    if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
        return "tools"
    return "__end__"

# Optional, add tracing in LangSmith
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "Multi-agent Collaboration"

_set_env("BAICHUAN_API_KEY")

# llm = ChatAnthropic(model="claude-3-haiku-20240307")
# llm = ChatOpenAI(model="gpt-4o-mini")
# 本地百川模型
# llm = ChatOpenAI(model="Baichuan2", api_key="123", base_url="http://bc.192.168.107.2.nip.io/v1")
# 豆包大模型
# llm = ChatOpenAI(model="ep-20240801230159-vp5jn", api_key="eb64ed3f-f257-46d1-a046-c28cb553ca28", base_url="https://ark.cn-beijing.volces.com/api/v3")
# 本地千问模型, 不支持function_call
# llm = ChatOpenAI(model="qwen2:72b", api_key="Bearer api_key", base_url="http://generate.yangzhiqiang.tech/v1")
# qwen模型
llm = ChatOpenAI(model="qwen-plus", api_key="sk-5c7a7ef8061047cf98ba253b2ae06b2b", base_url="https://dashscope.aliyuncs.com/compatible-mode/v1")
tool = search_tool()
# We can bind the llm to a tool definition, a pydantic model, or a json schema
llm_with_tools = llm.bind_tools([tool] + [RequestAssistance])

# Get the prompt to use - you can modify this!
prompt = hub.pull("hwchase17/openai-functions-agent")
print(f"prompt: {prompt.pretty_print()}")
# chat_agent = prompt | llm_with_tools

if __name__ == "__main__":

    handler2 = MyCustomHandlerTwo()
    handler3 = MyCustomAsyncHandler()

    graph_builder = StateGraph(State)

    graph_builder.add_node("chatbot", chatbot)
    graph_builder.add_node("tools", ToolNode(tools=[tool]))
    graph_builder.add_node("human", human_node)

    graph_builder.add_conditional_edges(
        "chatbot",
        select_next_node,
        {"human": "human", "tools": "tools", "__end__": "__end__"},
    )

    graph_builder.add_edge(START, "chatbot")
    # Any time a tool is called, we return to the chatbot to decide the next step
    graph_builder.add_edge("tools", "chatbot")
    graph_builder.add_edge("human", "chatbot")
    memory = SqliteSaver.from_conn_string(":memory:")
    graph = graph_builder.compile(
        checkpointer=memory,
        interrupt_before=["human"],
    )
    graph = graph_builder.compile()

    try:
        # 提取文件名前缀，不包含目录
        graph.get_graph().draw_mermaid_png(output_file_path=os.path.dirname(__file__) + "/" + os.path.basename(__file__).split(".")[0] + "_graph.png")
        # display(Image(graph.get_graph().draw_mermaid_png()))
    except Exception:
        # This requires some extra dependencies and is optional
        print(f"Error: {Exception}")
        pass

    config = RunnableConfig(callbacks=[handler3], configurable={"thread_id": "1"})
    events = graph.stream(
        {
            "messages": [
                ("user", "I'm learning LangGraph. Could you do some research on it for me?")
            ]
        },
        config,
        stream_mode="values",
    )
    for event in events:
        if "messages" in event:
            event["messages"][-1].pretty_print()

	# 第二轮提问
    events = graph.stream(
        {
            "messages": [
                ("user", "Ya that's helpful. Maybe I'll build an autonomous agent with it!")
            ]
        },
        config,
        stream_mode="values",
    )
    for event in events:
        if "messages" in event:
            event["messages"][-1].pretty_print()
            
    to_replay = None
    for state in graph.get_state_history(config):
        print("Num Messages: ", len(state.values["messages"]), "Next: ", state.next)
        print("-" * 80)
        if len(state.values["messages"]) == 6:
            # We are somewhat arbitrarily selecting a specific state based on the number of chat messages in the state.
            to_replay = state        
            
    # The `thread_ts` in the `to_replay.config` corresponds to a state we've persisted to our checkpointer.
    for event in graph.stream(None, to_replay.config, stream_mode="values"):
        if "messages" in event:
            event["messages"][-1].pretty_print()