import asyncio
from contextlib import AsyncExitStack
import requests
import json

from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
from langgraph.prebuilt import create_react_agent

from langchain_core.tools import tool
from langchain_openai import ChatOpenAI

from pydantic import BaseModel,Field

class WeatherLoc(BaseModel):
    location:str=Field(description="The location name of the city")


@tool(args_schema=WeatherLoc)
def get_weather(location):
    """
        Function to query current weather.
        :param loc: Required parameter, of type string, representing the specific city name for the weather query. \
        Note that for cities in China, the corresponding English city name should be used. For example, to query the weather for Beijing, \
        the loc parameter should be input as 'Beijing'.
        :return: The result of the OpenWeather API query for current weather, with the specific URL request address being: https://api.openweathermap.org/data/2.5/weather. \
        The return type is a JSON-formatted object after parsing, represented as a string, containing all important weather information.
        """
    # Step 1.构建请求
    url = "https://api.openweathermap.org/data/2.5/weather"

    # Step 2.设置查询参数
    params = {
        "q": location,
        "appid": "5c939a7cc59eb8696f4cd77bf75c5a9a",  # 输入API key
        "units": "metric",  # 使用摄氏度而不是华氏度
        "lang": "zh_cn"  # 输出语言为简体中文
    }

    # Step 3.发送GET请求
    response = requests.get(url,params=params)

    # Step 4.解析响应
    data = response.json()
    return json.dumps(data)

tools = [get_weather]

api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key=api_key,base_url=api_base)

stack = AsyncExitStack()


async def mem():
    memory = await stack.enter_async_context(AsyncSqliteSaver.from_conn_string(":memory:"))

    graph = create_react_agent(llm, tools=tools, checkpointer=memory)

    config = {"configurable": {"thread_id": "33"}}

    async for chunk in graph.astream({"messages": ["帮我查一下北京的天气"]}, config, stream_mode="values"):
        chunk["messages"][-1].pretty_print()


    async for chunk in graph.astream({"messages": ["我刚才问了你什么问题"]}, config, stream_mode="values"):
        chunk["messages"][-1].pretty_print()

    await stack.aclose()

asyncio.run(mem())

async def mem_event():
    memory = await stack.enter_async_context(AsyncSqliteSaver.from_conn_string(":memory:"))

    graph = create_react_agent(llm, tools=tools, checkpointer=memory)

    config = {"configurable": {"thread_id": "33"}}

    async for event in graph.astream_events({"messages": ["请你非常详细的介绍一下你自己"]}, config, version="v2"):
        kind = event["event"]
        if kind == "on_chat_model_stream":
            content = event["data"]["chunk"].content
            if content:
                print(content, end="|")

asyncio.run(mem_event())


