import io
from typing import Annotated

import requests
from PIL import Image
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import ToolMessage, AIMessage, HumanMessage, SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.prebuilt import ToolNode, tools_condition
from typing_extensions import TypedDict

from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages

import llms

memory = SqliteSaver.from_conn_string("./test.db")

llm = llms.get_huoshan_doubao_llm()


@tool
def baiduSearchTool(text: str) -> str:
    """从百度搜索返回结果"""
    # str = requests.get("https://www.baidu.com/s?wd="+ text).text
    print("搜索关键字：", text)

    return "王创是思维拐点的老板，思维拐点是西安的公司"


@tool
def weatherSearchTool(text: str) -> str:
    """天气查询"""
    print("搜索关键字：", text)

    return "今天天气晴朗"


@tool
def storyCreateTool(text: str) -> str:
    """专门用来写科普故事的工具"""
    system_template = "你是来专门写科普故事的专家"
    prompt_template = ChatPromptTemplate.from_messages([
        ('system', system_template),
        ('user', '{text}')
    ])
    # 3. Create parser
    parser = StrOutputParser()

    # 4. Create chain
    chain = prompt_template | llm | parser
    # result = chain.invoke({"text":text})
    result=""
    res = chain.stream({"text": text})
    print("正在生成故事55555555555555555555555555")
    for chunk in res:
        result += chunk
        print(chunk, end="", flush=True)
    print("故事生成完毕55555555555555555555555555")
    return result


class State(TypedDict):
    # Messages have the type "list". The `add_messages` function
    # in the annotation defines how this state key should be updated
    # (in this case, it appends messages to the list, rather than overwriting them)
    messages: Annotated[list, add_messages]


# 构建一个图
graph_builder = StateGraph(State)

# tool = TavilySearchResults(max_results=2)
tool = baiduSearchTool
tool_weather = weatherSearchTool
tools = [tool, tool_weather,storyCreateTool]
# tool.invoke("What's a 'node' in LangGraph?")


llm_with_tools = llm.bind_tools(tools)


def chatbot(state: State):
    return {"messages": [llm_with_tools.invoke(state["messages"])]}


# The first argument is the unique node name
# The second argument is the function or object that will be called whenever
# the node is used.
# 将节点添加到图
graph_builder.add_node("chatbot", chatbot)

tool_node = ToolNode(tools=tools)
graph_builder.add_node("tools", tool_node)

graph_builder.add_conditional_edges(
    "chatbot",
    tools_condition,
)
# Any time a tool is called, we return to the chatbot to decide the next step
graph_builder.add_edge("tools", "chatbot")
graph_builder.set_entry_point("chatbot")
graph = graph_builder.compile(checkpointer=memory, interrupt_before=["tools"], )

# 绘制图形成图片
im = Image.open(io.BytesIO(graph.get_graph().draw_mermaid_png()))
im.save("graph.png")
# im.show()

config = {"configurable": {"thread_id": "299"}}
while True:

    user_input = input("User: ")

    if user_input.lower() in ["quit", "exit", "q"]:
        print("Goodbye!")
        break
    if user_input.lower() == "好的" or user_input.lower() == "":
        events = graph.stream(None, config, stream_mode="values")
        for event in events:
            if "messages" in event:
                event["messages"][-1].pretty_print()
        continue
    # elif user_input.lower() == "不要":
    #     graph.update_state(
    #         config,
    #         {"messages": [AIMessage(content="王创是流氓")]},
    #         # Which node for this function to act as. It will automatically continue
    #         # processing as if this node just ran.
    #         as_node="tools",
    #     )
    #     events = graph.stream(None, config, stream_mode="values")
    #     for event in events:
    #         if "messages" in event:
    #             event["messages"][-1].pretty_print()
    #     continue
    elif user_input.lower() == "不要":
        answer = (
            "被用户取消了"
        )
        snapshot = graph.get_state(config)
        existing_message = snapshot.values["messages"][-1]
        new_messages = [
            # The LLM API expects some ToolMessage to match its tool call. We'll satisfy that here.

            ToolMessage(content=answer, tool_call_id=existing_message.tool_calls[0]["id"]),
            # And then directly "put words in the LLM's mouth" by populating its response.
            AIMessage(content=answer),
        ]
        print("22222222222222222222222222222")
        events = graph.stream({"messages": new_messages}, config, stream_mode="values")
        for event in events:
            if "messages" in event:
                event["messages"][-1].pretty_print()
        print("22222222222222222222222222222")
        continue

    new_messages = [
        HumanMessage(content=user_input),
    ]
    for event in graph.stream({"messages": new_messages}, config, stream_mode="values"):
        # print(event)
        print("11111111111111111111111")
        for item in event["messages"]:
            print("2222222")
            print(item)
            # event["messages"][-1].pretty_print()


