
import os
from datetime import datetime
from typing import TypedDict, Annotated
import yaml
import asyncio
# langchain的类
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langgraph.graph.message import add_messages
from langchain_openai import AzureChatOpenAI
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode, tools_condition
from langgraph.constants import START

from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda
# 自己的类
from azs_token_info import TokenInfo
from azs_use_token import TokenChatAgent

prompt_config_path = os.path.join(os.path.dirname(__file__), 'prompt_config.yaml')

class Config:
    base_url = "http://dev.azures.cn:28888/"
    username = "auto_test"
    password = "Qwer123#@!"
    tokenManage =TokenInfo(base_url=base_url, username=username, password=password)

class State(TypedDict):
    messages: Annotated[list, add_messages]


params = {"azure_endpoint": "https://azs-dev-us-01.openai.azure.com/",
          "openai_api_key": "73cf593826a54526bebd341aa0ca551e", "model_name": "gpt-3.5-turbo",
          "deployment_name": "gpt-4o",
          "openai_api_version": "2023-07-01-preview", "temperature": 0, "max_tokens": 4096, "top_p": 0,
          "frequency_penalty": 0, "presence_penalty": 0, "streaming": False,
          "request_timeout": 600,
          "max_retries": 0}


# 获取token消耗的信息

@tool
def get_token_info()->str:
    """
    Token Info
    获取token消耗的信息
    :return:
    """

    token_chat = TokenChatAgent(Config.base_url,Config.tokenManage.get_token())
    res = token_chat.use_all_token()
    return res
# 获取个人token消耗的信息
@tool
def get_person_info(person_name:str):
    """
    Person Info
    获取个人token消耗的信息
    :return:
    """
    print(person_name)
    token_chat = TokenChatAgent(Config.base_url, Config.tokenManage.get_token())
    res  = token_chat.get_person_token(person_name)
    return res

llm = AzureChatOpenAI(**params)
tools = [get_token_info,get_person_info]







class AgentZxh:
    def __init__(self,prompt_init:str=prompt_config_path):
        with open(prompt_init, "r", encoding="utf-8") as f:
            self.config = yaml.safe_load(f)


    def agentStart(self):
        assistant_prompt = ChatPromptTemplate.from_messages(
            [
                ('system', self.config['system_prompt']),
                ("placeholder", "{messages}"),
            ]
        ).partial(time=datetime.now)


        llm_with_tools = assistant_prompt | llm.bind_tools(tools)


        def chatbot(state: State):
            llm_result= llm_with_tools.invoke(state)
            return {"messages":llm_result }


        graph_builder = StateGraph(State)

        tool_node = ToolNode(tools=tools)

        graph_builder.add_node("tools", tool_node)

        graph_builder.add_node('chatbot',chatbot)

        graph_builder.add_edge(START, "chatbot")

        graph_builder.add_conditional_edges(
            "chatbot",
            tools_condition,
        )

        graph_builder.add_edge("tools", "chatbot")
        graph = graph_builder.compile()

        return graph



if __name__ == '__main__':
    zxhAgent = AgentZxh()
    zxhGraph = zxhAgent.agentStart()


    def stream_graph_updates(user_input: str):
        for event in zxhGraph.stream({"messages": [("user", user_input)]}):
            for value in event.values():
                if type(value['messages']) == list:

                    print('Assistant',value['messages'][0].content)
                else:
                    print("Assistant:", value['messages'].content)


    while True:
        try:
            user_input = input("User: ")
            if user_input.lower() in ["quit", "exit", "q"]:
                print("Goodbye!")
                break

            stream_graph_updates('wuxn消耗了多少token')
        except:
            # fallback if input() is not available
            user_input = "What do you know about LangGraph?"
            print("User: " + user_input)
            # stream_graph_updates(user_input)
            break