Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import asyncio | |
| import os | |
| from minion import config | |
| from minion.main import LocalPythonEnv | |
| from minion.main.rpyc_python_env import RpycPythonEnv | |
| from minion.main.brain import Brain | |
| from minion.providers import create_llm_provider | |
| # 初始化 brain(只初始化一次,避免每次请求都重建) | |
| def build_brain(): | |
| model = "gpt-4.1" | |
| llm_config = config.models.get(model) | |
| llm = create_llm_provider(llm_config) | |
| #python_env = RpycPythonEnv(port=3007) | |
| python_env = LocalPythonEnv(verbose=False) | |
| brain = Brain( | |
| python_env=python_env, | |
| llm=llm, | |
| ) | |
| return brain | |
| brain = build_brain() | |
| async def minion_respond_async(query): | |
| obs, score, *_ = await brain.step(query=query, route="python", check=False) | |
| return obs | |
| def minion_respond(query): | |
| # gradio sync接口,自动调度async | |
| return asyncio.run(minion_respond_async(query)) | |
| demo = gr.Interface( | |
| fn=minion_respond, | |
| inputs="text", | |
| outputs="text", | |
| title="Minion Brain Chat", | |
| description="用 Minion1 Brain 作为后端的智能问答" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(mcp_server=True) | |