from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_core.models import UserMessage
from autogen_agentchat.messages import MultiModalMessage
from autogen_core import Image as AGImage
import PIL
from autogen_core import Image
from io import BytesIO
import asyncio
import requests
from typing import Literal
from pydantic import BaseModel

class AgentResponse(BaseModel):
    thoughts: str
    response: Literal["god", "bad", "just so so"]

# Define a model client. You can use other model client that implements
# the `ChatCompletionClient` interface.


model_client = OpenAIChatCompletionClient(
    model="qwen-vl-max",
    api_key="sk-925b8bbb82424b74a8de940d2dc5a6ce",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model_info={
        "vision": True,
        "function_calling": True,
        "json_output": False,
        "family": "unknown",
        "structured_output": True,
        "max_tokens": 8192,
        "top_p":0.8
    }
    # api_key="YOUR_API_KEY",
)


# Define a simple function tool that the agent can use.
# For this example, we use a fake weather tool for demonstration purposes.
async def get_weather(city: str) -> str:
    """Get the weather for a given city."""
    return f"The weather in {city} is 73 degrees and Sunny."


# Define an AssistantAgent with the model, tool, system message, and reflection enabled.
# The system message instructs the agent via natural language.
agent = AssistantAgent(
    name="weather_agent",
    model_client=model_client,
    tools=[get_weather],
    system_message="Categorize the input as god, bad, just so so following the JSON format",
    reflect_on_tool_use=False,
    model_client_stream=True, 
    output_content_type=AgentResponse, # Enable streaming tokens from the model client.
)


# Run the agent and stream the messages to the console.
async def main() -> None:
    await Console(agent.run(task="北京什么天气?"))
    # Close the connection to the model client.
    await model_client.close()


# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).
# asyncio.run(main())

async def test_model():
    result = await model_client.create([UserMessage(content="How is the weather in Beijing?", source="user")])
    print(result)
    await model_client.close()

asyncio.run(test_model())

async def test_model_with_multi_modal():
    pil_image = PIL.Image.open(BytesIO(requests.get("https://picsum.photos/300/200").content))
    img = Image(pil_image)
    multi_modal_message = MultiModalMessage(content=["Can you describe the content of this image?", img], source="user")
    await Console(agent.run_stream(task=multi_modal_message))
    #print(result.messages[-1].content)

#asyncio.run(test_model_with_multi_modal())
