import os

import agentscope
from agentscope.agents import UserAgent
from agentscope.message.msg import Msg

from agents.vision_agent import VisionAgent
from vllm_models.showui_model import ShowUIModel

model_configs = [
    {
        "model_type": "openai_chat",
        "config_name": "vllm-openai-api",
        "model_name": "Qwen/Qwen2.5-72B-Instruct",
        "client_args": {
            "base_url": "https://api-inference.modelscope.cn/v1",
        },
        "api_key": os.environ.get("MODELSCOPE_ACCESS_TOKEN"),
        "generate_args": {"temperature": 0.2},
        "stream": True,
    },
]
agentscope.init(model_configs=model_configs)

show_ui_agent = VisionAgent(
    name="showui-agent",
    model_config_name="vllm-openai-api",
    sys_prompt="You are a helper assistant.",
    vision_model=ShowUIModel(model_name="AI-ModelScope/ShowUI-2B", base_url="http://10.0.102.69:8000/v1",
                             api_key="123456"),
    use_memory=False,
)

user = UserAgent(name="User", input_hint="User Input ('exit' to quit): ")

vision_msg = Msg(name="user", content="My Computer icon.", role="user", url="../examples/01.PNG")

response = show_ui_agent(vision_msg)
print(response.content)
