"""大模型客户端：封装大模型调用逻辑，支持后续切换模型（如OpenAI、智谱等）"""
from openai import AsyncOpenAI
from typing import List, Dict, Optional
from config.settings import settings
from config.constants import ROLE_SYSTEM,ROLE_ASSISTANT
from core.agent_client import get_agent_response  # 导入智能体响应函数

# 全局大模型客户端（硅基流）
llm_client = AsyncOpenAI(
    api_key=settings.LLM_API_KEY,
    base_url=settings.LLM_BASE_URL
)

async def get_llm_stream_response(
    user_query: str,
    history: List[Dict[str, str]],
    sys_prompt: str = ROLE_SYSTEM,
    model: str = settings.LLM_DEFAULT_MODEL,
    temperature: float = settings.DEFAULT_TEMPERATURE,
    top_p: float = settings.DEFAULT_TOP_P,
    max_token: Optional[int] = settings.DEFAULT_MAX_TOKEN
):
    """
    调用大模型获取流式响应
    :param user_query: 用户当前提问
    :param history: 历史对话列表
    :param sys_prompt: 系统提示词
    :param model: 调用的模型名称
    :param temperature: 采样温度
    :param top_p: 采样概率
    :param max_token: 最大生成Token数
    :return: 大模型流式响应对象
    """
    # 构建大模型所需的message格式
    messages = [{"role": ROLE_SYSTEM, "content": sys_prompt}]
    # 添加历史对话（过滤无效数据）
    valid_history = [h for h in history if h.get("role") and h.get("content")]
    messages.extend(valid_history)
    # 添加当前用户提问
    messages.append({"role": "user", "content": user_query})

    # 调用大模型（流式输出）
    return await llm_client.chat.completions.create(
        model=model,
        messages=messages,
        temperature=temperature,
        top_p=top_p,
        max_tokens=max_token,
        stream=True  # 开启流式
    )

async def get_llm_response(
    user_query: str,
    history: List[Dict[str, str]],
    sys_prompt: str = ROLE_SYSTEM,
    temperature: float = settings.DEFAULT_TEMPERATURE,
    top_p: float = settings.DEFAULT_TOP_P,
    max_token: Optional[int] = settings.DEFAULT_MAX_TOKEN
):
    """
    调用智能体获取响应（替代原来的直接大模型调用）
    """
    # 调用智能体，传入当前查询和历史对话
    response_text = get_agent_response(
        query=user_query,
        history=history
    )
    
    # 构建与原有接口兼容的响应格式
    class MockLLMResponse:
        class ChoicesItem:
            class Message:
                def __init__(self, content):
                    self.content = content
            def __init__(self, content):
                self.message = self.Message(content)
        
        def __init__(self, content):
            self.choices = [self.ChoicesItem(content)]
    
    return MockLLMResponse(response_text)