from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
import asyncio
from llama_index.core.agent.workflow import ReActAgent
from llama_index.core.workflow import Context
from llama_index.llms.langchain import LangChainLLM
from llama_index.llms.dashscope import DashScope
from langchain_community.chat_models import ChatTongyi
import os
def get_weather(city:str) -> str:
    print(f">>>正在获取{city}的天气...")
    return f"城市：{city},天气是晴天"

async def main():
    llm = LangChainLLM(ChatTongyi(
        model="qwen-plus",
        api_key=os.getenv("DASHSCOPE_API_KEY"),
    ))
    
    agent = ReActAgent(llm=llm,tools=[get_weather])
    ctx = Context(agent)
    mempry = ChatMemoryBuffer.from_defaults(token_limit=4000)

    resp = await agent.run("北京天气怎么样", ctx=ctx,memory=mempry)
    print(resp.response)
    resp = await agent.run("天津呢", ctx=ctx,memory=mempry)
    print(resp.response)
    return resp

# 运行异步函数
if __name__ == "__main__":
    asyncio.run(main())