from typing import List, Dict, Optional
from llmadapter.llmadapter import Adapter
from common.log import log_algo, log_method_io
from conf.config import settings
from pydantic import BaseModel
class LLMClient:
    """大语言模型调用中间件,提供统一的模型调用接口"""
    
    def __init__(self):
        """初始化客户端"""
        self.adapter = Adapter()
        self.llm = self.adapter.get_llm(
            model=settings.LLM_CONFIG["MODEL"],
            api_key=settings.LLM_CONFIG["API_KEY"],
            base_url=settings.LLM_CONFIG["BASE_URL"]
        )
        self.semaphore = settings.LLM_CONFIG["SEMAPHORE"]
        
        
    def llm_request_json_chat(self, messages: str, output_basemodel: BaseModel, **kwargs) -> str:
        """同步调用模型进行对话
        
        Args:
            messages: 对话历史消息列表
            output_model: 输出模型
        Returns:
            str: 模型回复的文本
        """
        return self.adapter.chat_as_json(llm=self.llm, messages=messages, output_basemodel=output_basemodel, **kwargs)
    
    def retry_chat_as_json(self, content_messages: str, system_messages: str, fix_messages: str, output_basemodel: BaseModel, max_retry: int = 3, **kwargs) -> Optional[Dict]:
        return self.adapter.retry_chat_as_json(
            llm=self.llm, 
            content_messages=content_messages, 
            system_messages=system_messages, 
            fix_messages=fix_messages, 
            output_basemodel=output_basemodel, 
            max_retry=max_retry, 
            **kwargs
        )
# 创建全局客户端实例
llm_client = LLMClient()
