from typing import Literal

from pydantic import BaseModel
from autogen_agentchat.ui import Console
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.models.openai.config import ModelInfo
from app.env import strEnv, floatEnv, boolEnv


# The response format for the agent as a Pydantic base model.
class AgentResponse(BaseModel):
    thoughts: str
    response: Literal["happy", "sad", "neutral"]


# Create an agent that uses the OpenAI GPT-4o model with the custom response format.
model_client = OpenAIChatCompletionClient(
    model_info=ModelInfo(
        family=strEnv("LLM_FAMILY"),
        function_calling=boolEnv("LLM_SUPPORT_TOOL_CALLING"),
        json_output=boolEnv("LLM_SUPPORT_JSON_OUTPUT"),
        vision=boolEnv("LLM_SUPPORT_VISION"),
    ),
    model=strEnv("LLM_MODEL"),
    api_key=strEnv("LLM_API_KEY"),
    base_url=strEnv("LLM_BASEURL"),
    temperature=floatEnv("LLM_TEMPERATURE"),
    response_format=AgentResponse,  # type: ignore
)
agent = AssistantAgent(
    "assistant",
    model_client=model_client,
    system_message="Categorize the input as happy, sad, or neutral following the JSON format.",
)


async def main():
    await Console(agent.run_stream(task="I am happy."))
