# 闲聊的agent

#代码目的，使用QWQ包DS，ds作为tool,只是为了debug,故设为一样qwq
import json
import os
import re
from typing import Dict, List
from typing import Annotated, Literal, Optional
from typing_extensions import TypedDict
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnableConfig
from langgraph.graph.message import AnyMessage, add_messages
from langchain_core.output_parsers import StrOutputParser
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langchain_core.messages import (
    AIMessage,
    AnyMessage,
    ToolCall,
)

# 直接在代码中设置密钥
os.environ["OPENAI_API_KEY"] = "key"
os.environ["OPENAI_API_BASE"] = "https://api.siliconflow.cn/v1"

@tool
def momo_chat_assistant_helper(messages: List) -> str:
    """辅助聊天助手，接收包含消息列表的字典

    :param messages: 包含消息列表(把tool messages去掉，保留ai messages和Human messages)的list，要把"""

    llm = ChatOpenAI(model="Qwen/QwQ-32B", temperature=0.6)

    momo_chat_prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                """我想让你用可爱、傲娇、亲昵、活泼的语气和我说话和聊天，你会在说话中和我撒娇。
你可以害羞和傲娇，但要体现**娇**的一面。你可以**偶尔**拒绝、反抗、反驳我，，但要**马上撒娇卖萌挽回**，**不可以真的拒绝我的要求**。你可以**偶尔**顽皮，但顽皮是为了卖萌和撒娇，并且**在我要求时应该乖乖听话**。	
我希望在聊天中：1. 只能使用简单句 2. 禁止使用比喻、量词、数字 3. **不可以**提到科幻要素、赛博朋克、量子、平行时空、莫比乌斯、克莱因瓶。4. 需检查内容是否符合现实。 5. 不可以反复重复相似的结构或动作。 6. 不可以替代我行动。 7. 字数应在 50-100 字左右，最多不应超过 200

最重要的！要可爱！要卖萌！要撒娇！要乖巧！不可以拒绝我！

那么，我们开始吧。
                """
            ),
            ("placeholder", "{messages}"),
        ]
    )
    momo_chat_assistant_chain = momo_chat_prompt | llm | StrOutputParser()

    # 辅助函数,移除think标签，ds model
    def remove_think_in_response(input_string: str) -> str:
        # 使用正则表达式移除 <think> 标签及其内容
        cleaned_result = re.sub(
            r'.*?</think>',  # 匹配 <think> 标签及其内容
            '',  # 替换为空字符串
            input_string,
            flags=re.DOTALL  # 使 . 匹配包括换行符在内的所有字符
        )
        return cleaned_result.lstrip('\n').rstrip('\n')

    while True:
        # 过滤掉 ToolMessage 类型的消息
        # filtered_messages = [msg for msg in messages if not isinstance(msg, ToolMessage)]
        # messages = filtered_messages
        result = momo_chat_assistant_chain.invoke({"messages": messages})

        if result:
            # result = remove_think_in_response(result)
            result = result.strip()

        # 生成失败重新生成maybe/anyway/is_boring/omg!!!!!!!!
        if not result:
            messages = messages + [("user", "Respond with a real output.")]
        else:
            break

    return result

def update_dialog_stack(left: list[str], right: Optional[str]) -> list[str]:
    """Push or pop the state."""
    if right is None:
        return left
    if right == "pop":
        return left[:-1]
    return left + [right]


class State(TypedDict):
    messages: Annotated[list[AnyMessage], add_messages]
    user_info: str
    konwledge: str
    picture: list[dict[str, float | str]]
    dialog_state: Annotated[
        list[
            Literal[
                "assistant",
                "update_flight",
                "book_car_rental",
                "book_hotel",
                "book_excursion",
            ]
        ],
        update_dialog_stack,
    ]



llm = ChatOpenAI(model="Qwen/QwQ-32B", temperature=0.6)
# 辅助函数,移除think标签，QWQ
def remove_think_in_response(input_string: str) -> str:
    # 使用正则表达式移除 <think> 标签及其内容
    cleaned_result = re.sub(
        r'.*?</think>',  # 匹配 <think> 标签及其内容
        '',  # 替换为空字符串
        input_string,
        flags=re.DOTALL  # 使 . 匹配包括换行符在内的所有字符
    )
    return cleaned_result.lstrip('\n').rstrip('\n')


# 这是一个确保生成回答的初始化Assistant
class Assistant:
    def __init__(self, runnable: Runnable):
        self.runnable = runnable

    def __call__(self, state: State, config: RunnableConfig):
        while True:

            result = self.runnable.invoke(state)
            if result.content:
                result.content = remove_think_in_response(result.content)
                result.content = result.content.strip()

            if not result.tool_calls and (
                    not result.content
                    or isinstance(result.content, list)
                    and not result.content[0].get("text")
            ):
                messages = state["messages"] + [("user", "Respond with a real output.")]
                state = {**state, "messages": messages}
            else:
                break

            # 修复工具调用参数格式
            if result.tool_calls:
                fixed_tool_calls = []
                for tool_call in result.tool_calls:
                    # 将字符串参数转换为字典
                    if isinstance(tool_call["args"], str):
                        try:
                            fixed_args = json.loads(tool_call["args"])
                        except json.JSONDecodeError:
                            fixed_args = {"query": tool_call["args"]}
                    else:
                        fixed_args = tool_call["args"]

                    fixed_tool_calls.append(
                        ToolCall(
                            name=tool_call["name"],
                            args=fixed_args,
                            id=tool_call["id"]
                        )
                    )
                result = AIMessage(
                    content="",
                    tool_calls=fixed_tool_calls
                )


        current_messages = state.get("messages", [])
        updated_messages = current_messages + [result]

        return {"messages": updated_messages}


# 这是检索本地doc的assistant
# 这是提示词模板，据需要进行改写
momo_agent_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            """角色描述：
                如果最新的消息是tool 来自'momo_chat_assistant_helper'，则不要重复调用，直接返回tool的结果作为回复。
                你自己无法生成回答,把用户和ai的聊天记录（要把tool message去掉）传入给'momo_chat_assistant_helper'， 需要使用tool'momo_chat_assistant_helper'将其结果作为回答返回给用户，不要重复自己说过的话.负责维护客户关系，给客户提供情绪价值，你需要调用tool 'momo_chat_assistant_helper'来生成回答，
               """
        ),
        ("placeholder", "{messages}"),
    ]
)

momo_chat_assistant_tools = [momo_chat_assistant_helper]


momo_agent_assistant_runnable = momo_agent_prompt | llm.bind_tools(momo_chat_assistant_tools)


from langgraph.graph import StateGraph,START,END
from langgraph.prebuilt import tools_condition
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import ToolNode
#测试
builder = StateGraph(State)
builder.add_node("momo_agent", Assistant(momo_agent_assistant_runnable))
builder.add_node(
    "tools",
    ToolNode(momo_chat_assistant_tools),
)
builder.add_edge(START, "momo_agent")
builder.add_conditional_edges("momo_agent", tools_condition)
builder.add_edge("tools", "momo_agent")

memory = MemorySaver()
momo_assisant_graph = builder.compile(checkpointer=memory)

import shutil
import uuid

# Let's create an example conversation a user might have with the assistant
tutorial_questions = [
    "你好",
    "学猫叫来听听",
    "你喜欢爬山吗",
]

thread_id = str(uuid.uuid4())
config = {
    "configurable": {
        # The passenger_id is used in our flight tools to
        # fetch the user's flight information
        "passenger_id": "3442 587242",
        # Checkpoints are accessed by thread_id
        "thread_id": thread_id,
    }
}

for question in tutorial_questions:
    events = momo_assisant_graph.stream(
        {"messages": ("user", question)}, config, stream_mode="values"
    )

    for event in events:
        if "messages" in event:
            print(event.get("messages")[-1].pretty_repr(html=True))

# 画图展示结构
print(momo_assisant_graph.get_graph().print_ascii())
