from typing import AsyncGenerator, Sequence, Union, Optional

from autogen_agentchat.agents import BaseChatAgent
from autogen_agentchat.base import Response
from autogen_agentchat.messages import AgentEvent, ChatMessage, TextMessage, BaseChatMessage
from autogen_core import CancellationToken, Component
from autogen_core.model_context import UnboundedChatCompletionContext
from autogen_core.models import UserMessage, RequestUsage, AssistantMessage
from openai import AsyncOpenAI
from pydantic import BaseModel
from typing_extensions import Self

from config.yaml_settings import get_llm_model_config, get_llm_api_config, LLMModelConfig, LLMAPIConfig


class OpenAiAssistantAgentConfig(BaseModel):
    name: str
    description: str = "An agent that provides assistance with ability to use tools."
    model: str = "doubao-seed-1-6-flash-250715"
    system_message: Union[str, None] = None
    base_url: str = "https://ark.cn-beijing.volces.com/api/v3/"
    api_key: str


class OpenaiAssistantAgent(BaseChatAgent, Component[OpenAiAssistantAgentConfig]):
    async def on_messages(self, messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken) -> Response:
        final_response = None
        async for message in self.on_messages_stream(messages, cancellation_token):
            if isinstance(message, Response):
                final_response = message

        if final_response is None:
            raise AssertionError("The stream should have returned the final result.")
        return final_response

    component_config_schema = OpenAiAssistantAgentConfig

    def __init__(
            self,
            name: str,
            description: str = "An agent that provides assistance with ability to use tools.",
            model: Optional[str] = None,
            api_key: Optional[str] = None,
            base_url: Optional[str] = None,
            system_message: Optional[str] = "You are a helpful assistant that can respond to messages. Reply with TERMINATE when the task has been completed.",
            model_config: Optional[LLMModelConfig] = None,
            api_config: Optional[LLMAPIConfig] = None,
    ):
        super().__init__(name=name, description=description)
        
        # 使用传入的配置或默认配置
        if model_config is None:
            model_config = get_llm_model_config()
        if api_config is None:
            api_config = get_llm_api_config()
        
        # 优先使用传入的参数，否则使用配置文件的默认值
        self._base_url = base_url or api_config.base_url
        self._system_message = system_message
        self._model = model or model_config.model_name
        self._model_context = UnboundedChatCompletionContext()

        # 使用 AsyncOpenAI 客户端
        self._model_client = AsyncOpenAI(
            base_url=self._base_url,
            api_key=api_key or api_config.api_key,
        )

    @property
    def produced_message_types(self) -> Sequence[type[ChatMessage]]:
        return (TextMessage,)

    async def on_messages_stream(
            self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken
    ) -> AsyncGenerator[Union[AgentEvent, ChatMessage, Response], None]:
        for msg in messages:
            await self._model_context.add_message(
                UserMessage(content=msg.content, source=msg.source)
            )

        history = await self._model_context.get_messages()

        # 构造 Chat 请求历史
        chat_history = []
        if self._system_message:
            chat_history.append({"role": "system", "content": self._system_message})

        for msg in history:
            role = "user" if isinstance(msg, UserMessage) else "assistant"
            chat_history.append({"role": role, "content": msg.content})

        # 发送到 openai (火山引擎)
        response = await self._model_client.chat.completions.create(
            model=self._model,
            messages=chat_history,
            temperature=0.3,
        )

        reply = response.choices[0].message.content
        usage = response.usage

        # 更新上下文
        await self._model_context.add_message(
            AssistantMessage(content=reply, source=self.name)
        )

        yield Response(
            chat_message=TextMessage(
                content=reply,
                source=self.name,
                models_usage=RequestUsage(
                    prompt_tokens=usage.prompt_tokens,
                    completion_tokens=usage.completion_tokens,
                )
            ),
            inner_messages=[],
        )

    async def on_reset(self, cancellation_token: CancellationToken) -> None:
        await self._model_context.clear()

    @classmethod
    def _from_config(cls, config: OpenAiAssistantAgentConfig) -> Self:
        return cls(
            name=config.name,
            description=config.description,
            model=config.model,
            api_key=config.api_key,
            base_url=config.base_url,
            system_message=config.system_message,
        )

    def _to_config(self) -> OpenAiAssistantAgentConfig:
        return OpenAiAssistantAgentConfig(
            name=self.name,
            description=self.description,
            model=self._model,
            system_message=self._system_message,
            base_url=self._base_url,
            api_key="***"  # 不导出密钥

        )