import asyncio
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.models.openai.config import ModelInfo
from app.env import strEnv, floatEnv, boolEnv


model_info = ModelInfo(
    family=strEnv("LLM_FAMILY"),
    function_calling=boolEnv("LLM_SUPPORT_TOOL_CALLING"),
    json_output=boolEnv("LLM_SUPPORT_JSON_OUTPUT"),
    vision=boolEnv("LLM_SUPPORT_VISION"),
)

# Define a model client. You can use other model client that implements
# the `ChatCompletionClient` interface.
model_client = OpenAIChatCompletionClient(
    model_info=model_info,
    model=strEnv("LLM_MODEL"),
    api_key=strEnv("LLM_API_KEY"),
    base_url=strEnv("LLM_BASEURL"),
    temperature=floatEnv("LLM_TEMPERATURE"),
)
