
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.messages import HumanMessage
import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import json
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import InMemorySaver



model = ChatTongyi(
    model="qwen-max",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
     api_key='sk-13c8bc2d23274db682f193b16ce57b64'
)
checkpointer = InMemorySaver()

def get_weather(city: str) -> str:
    """Get weather for a given city."""
    print("---------------------------------------------")
    return f"It's always sunny in {city}!"

agent = create_react_agent(
    model=model,
    tools=[get_weather],
    prompt="You are a helpful assistant",
    checkpointer=checkpointer  
)
config = {"configurable": {"thread_id": "abc123"}}

@cl.on_message
async def on_message(message: cl.Message):
    '''
    收到 客户 信息处理
    '''
    output=agent.invoke(
    {"messages": [{"role": "user", "content": message.content}]},config=config)
    await  cl.Message(
        content=output
    ).send()



