from qwen_agent.llm import get_chat_model
from qwen_agent.gui import WebUI

llm_cfg = {
    'model': 'qwen-max',
    'model_server': 'dashscope',
    'generate_cfg': {'top_p': 0.8}
}

llm = get_chat_model(llm_cfg)

messages = [
    {'role': 'user', 'content': "你是谁"}
]

responses = []
for responses in llm.chat(messages=messages, stream=True):
    print(responses)


WebUI(assistant).launch(server_name='0.0.0.0', server_port=7860)