# !pip install google-genai
import asyncio
import os
from typing import AsyncGenerator, Sequence

from autogen_agentchat.agents import AssistantAgent, BaseChatAgent
from autogen_agentchat.base import Response
from autogen_agentchat.conditions import MaxMessageTermination, TextMentionTermination
from autogen_agentchat.messages import BaseAgentEvent, BaseChatMessage, TextMessage
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_agentchat.ui import Console
from autogen_core import CancellationToken
from autogen_core.model_context import UnboundedChatCompletionContext
from autogen_core.models import AssistantMessage, RequestUsage, UserMessage
from autogen_ext.models.openai import OpenAIChatCompletionClient


class GeminiAssistantAgent(BaseChatAgent):
    def __init__(
        self,
        name: str,
        model_client: OpenAIChatCompletionClient,
        description: str = "An agent that provides assistance with ability to use tools.",
        system_message: str
        | None = "You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.",
    ):
        super().__init__(name=name, description=description)
        self._model_context = UnboundedChatCompletionContext()
        self._model_client = model_client
        self._system_message = system_message
    

    @property
    def produced_message_types(self) -> Sequence[type[BaseChatMessage]]:
        return (TextMessage,)

    async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
        final_response = None
        async for message in self.on_messages_stream(messages, cancellation_token):
            if isinstance(message, Response):
                final_response = message

        if final_response is None:
            raise AssertionError("The stream should have returned the final result.")

        return final_response

    async def on_messages_stream(
        self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken
    ) -> AsyncGenerator[BaseAgentEvent | BaseChatMessage | Response, None]:
        # Add messages to the model context
        for msg in messages:
            await self._model_context.add_message(msg.to_model_message())

        # Get conversation history as model messages
        model_messages = await self._model_context.get_messages()
      
        # Call the model client with create_stream for streaming response
        response_content = ""
        async for chunk in self._model_client.create_stream(model_messages):
            # Extract content from each chunk
            if hasattr(chunk, 'content'):
                chunk_content = chunk.content
                if isinstance(chunk_content, str):
                    response_content += chunk_content
                elif isinstance(chunk_content, list):
                    # Handle function calls if needed
                    response_content += str(chunk_content)
                else:
                    response_content += str(chunk_content)
            elif hasattr(chunk, 'choices') and len(chunk.choices) > 0:
                # Handle OpenAI-style response format
                delta = chunk.choices[0].delta
                if hasattr(delta, 'content') and delta.content:
                    response_content += delta.content

        # Add response to model context
        await self._model_context.add_message(AssistantMessage(content=response_content, source=self.name))

        # Yield the final response
        yield Response(
            chat_message=TextMessage(content=response_content, source=self.name),
            inner_messages=[],
        )

    async def on_reset(self, cancellation_token: CancellationToken) -> None:
        """Reset the assistant by clearing the model context."""
        await self._model_context.clear()



model_client = OpenAIChatCompletionClient(
    model="qwen-vl-max",
    api_key="sk-925b8bbb82424b74a8de940d2dc5a6ce",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model_info={
        "vision": True,
        "function_calling": True,
        "json_output": False,
        "family": "unknown",
        "structured_output": True,
        "max_tokens": 8192,
        "top_p":0.8
    }
    # api_key="YOUR_API_KEY",
)

primary_agent = AssistantAgent(
    "primary",
    model_client=model_client,
    system_message="You are a helpful AI assistant.",
)

# Create a critic agent based on our new GeminiAssistantAgent.
gemini_critic_agent = GeminiAssistantAgent(
    "gemini_critic",
    model_client=model_client,
    system_message="Provide constructive feedback. Respond with 'APPROVE' to when your feedbacks are addressed.",
)


async def main() -> None:

    # Define a termination condition that stops the task if the critic approves or after 10 messages.
    termination = TextMentionTermination("APPROVE") | MaxMessageTermination(10)

    # Create a team with the primary and critic agents.
    team = RoundRobinGroupChat([primary_agent, gemini_critic_agent], termination_condition=termination)

    await Console(team.run_stream(task="Write a Haiku poem with 4 lines about the fall season."))
    await model_client.close()

asyncio.run(main())
