"""
LangChain Invoker Implementation
"""
from typing import List, Optional, AsyncIterator, Any

from langchain_core.messages import (
    BaseMessage, SystemMessage, HumanMessage, AIMessage, ToolMessage, AIMessageChunk
)
from langchain_core.tools import BaseTool
from langchain_core.exceptions import LangChainException

from axiom_boot.core.exceptions import FrameworkException, AIException
from axiom_boot.ai.interfaces import (
    ILLMInvoker, AxiomMessage, AxiomResponse, ToolCall, UsageMetadata
)
from axiom_boot.di.decorators import service
from axiom_boot.di.dependency import autowired
from axiom_boot.ai.impl.langchain.llm_manager_impl import LLMClientManager


@service()
class LangchainInvoker(ILLMInvoker):
    """
    ILLMInvoker 接口的 LangChain 实现。
    它依赖 LLMClientManager 来获取具体的客户端实例，并实现了
    Axiom 数据模型与 LangChain 数据模型之间的双向转换逻辑。
    """

    def __init__(self, llm_client_manager: LLMClientManager = autowired()):
        self.llm_client_manager = llm_client_manager

    def _to_langchain_messages(self, messages: List[AxiomMessage]) -> List[BaseMessage]:
        """将 AxiomMessage 列表转换为 LangChain BaseMessage 列表。"""
        lc_messages = []
        for msg in messages:
            if msg.role == "system":
                lc_messages.append(SystemMessage(content=msg.content))
            elif msg.role == "user":
                # 新增：支持多模态 content
                lc_messages.append(HumanMessage(content=msg.content))
            elif msg.role == "assistant":
                tool_calls = []
                if msg.tool_calls:
                    # LangChain 的 AIMessage tool_calls 需要的是 dict 格式
                    tool_calls = [
                        tc.model_dump() for tc in msg.tool_calls
                    ]
                # LangChain AIMessage 的 content 字段必须是 str
                lc_messages.append(AIMessage(content=str(msg.content), tool_calls=tool_calls))
            elif msg.role == "tool":
                lc_messages.append(ToolMessage(content=str(msg.content), tool_call_id=msg.tool_call_id))
        return lc_messages

    def _from_langchain_message(self, lc_message: BaseMessage, turn: int) -> AxiomMessage:
        """将 LangChain 的 BaseMessage 转换为 AxiomMessage。"""
        # AIMessageChunk in stream has 'type' but not a direct role mapping
        role = lc_message.type
        if isinstance(lc_message, AIMessage) or isinstance(lc_message, AIMessageChunk):
            role = "assistant"

        tool_calls = None
        tool_call_id = None

        if hasattr(lc_message, 'tool_calls') and lc_message.tool_calls:
            tool_calls = [
                ToolCall(id=tc["id"], name=tc["name"], args=tc["args"])
                for tc in lc_message.tool_calls
            ]
        
        if hasattr(lc_message, 'tool_call_id') and lc_message.tool_call_id:
            tool_call_id = lc_message.tool_call_id

        return AxiomMessage(
            role=role,
            content=lc_message.content,
            turn=turn,
            tool_calls=tool_calls,
            tool_call_id=tool_call_id
        )

    async def ainvoke(
        self,
        client_name: str,
        messages: List[AxiomMessage],
        tools: Optional[List[BaseTool]] = None,
        **kwargs: Any
    ) -> AxiomResponse:
        """以非流式的方式调用一个 LLM。"""
        client = self.llm_client_manager.get_client(client_name)
        lc_messages = self._to_langchain_messages(messages)
        
        if tools:
            client = client.bind_tools(tools)
        
        try:
            response_lc = await client.ainvoke(lc_messages, **kwargs)
        except LangChainException as e:
            # 将通用的 LangChain 异常转换为业务异常
            raise AIException(f"AI 服务调用失败: {e}") from e
        
        # 假设响应轮次是输入轮次的延续或+1，这里简化为取最后一个消息的轮次
        last_turn = messages[-1].turn if messages else 1
        axiom_message = self._from_langchain_message(response_lc, turn=last_turn)

        # 新增：解析 token 使用量
        usage = None
        if response_lc.response_metadata and 'token_usage' in response_lc.response_metadata:
            token_usage_data = response_lc.response_metadata['token_usage']
            usage = UsageMetadata(
                prompt_tokens=token_usage_data.get('prompt_tokens', 0),
                completion_tokens=token_usage_data.get('completion_tokens', 0),
                total_tokens=token_usage_data.get('total_tokens', 0),
            )
            
        return AxiomResponse(message=axiom_message, usage=usage)

    async def astream(
        self,
        client_name: str,
        messages: List[AxiomMessage],
        tools: Optional[List[BaseTool]] = None,
        **kwargs: Any
    ) -> AsyncIterator[AxiomMessage]:
        """以流式的方式调用一个 LLM。"""
        client = self.llm_client_manager.get_client(client_name)
        lc_messages = self._to_langchain_messages(messages)

        if tools:
            client = client.bind_tools(tools)
            
        last_turn = messages[-1].turn if messages else 1

        try:
            async for chunk in client.astream(lc_messages, **kwargs):
                # 将 LangChain 的 AIMessageChunk 转换为 AxiomMessage chunk
                tool_calls = None
                if chunk.tool_call_chunks:
                    tool_calls = [
                        ToolCall(
                            id=tc.get("id"), 
                            name=tc.get("name"), 
                            args=tc.get("args")
                        )
                        for tc in chunk.tool_call_chunks
                    ]

                yield AxiomMessage(
                    role="assistant",
                    content=chunk.content,
                    turn=last_turn,
                    tool_calls=tool_calls
                )
        except LangChainException as e:
            # 将通用的 LangChain 异常转换为业务异常
            raise AIException(f"AI 服务调用失败: {e}") from e 