"""
Author: xubing
Date: 2024-11-21
Description: OpenAI兼容的LLM客户端
"""

from logger.logger_config_db import setup_logger
from typing import Generator, List

from openai import OpenAI

from clients.base_client import BaseClient, ChatResponse, Message

# 设置日志记录器
logger = setup_logger(__name__)


class LLMClient(BaseClient):
    """OpenAI兼容的LLM客户端"""

    def __init__(self, model_name):
        super().__init__(model_name)
        try:
            self.client = OpenAI(
                api_key=self.model_conf["api_key"],
                base_url=self.model_conf["base_url"],
            )
        except Exception as e:
            logger.error(f"Failed to initialize OpenAI client: {e}")
            raise

    # 只展示需要修改的部分
    def chat(self, messages: List[Message]) -> ChatResponse:
        try:
            # 使用默认参数
            params = {
                "model": self.model_conf["model_name"],
                "messages": messages,
                "stream": False,
                "temperature": self.default_params.get("temperature", 0),
            }

            # 如果设置了max_tokens，添加到参数中
            if "max_tokens" in self.default_params:
                params["max_tokens"] = self.default_params["max_tokens"]

            completion = self.client.chat.completions.create(**params)
            result = completion.choices[0].message.content
            logger.info(f"{self.model_name} response: {result}")
            return ChatResponse(content=result)
        except Exception as e:
            logger.error(f"Chat error with {self.model_name}: {e}")
            return ChatResponse(content="", status=False, error_msg=str(e))

    def chat_stream(self,
                    messages: List[Message]) -> Generator[str, None, None]:
        try:
            # 使用默认参数
            params = {
                "model": self.model_conf["model_name"],
                "messages": messages,
                "stream": True,
                "temperature": self.default_params.get("temperature", 0),
            }

            # 如果设置了max_tokens，添加到参数中
            if "max_tokens" in self.default_params:
                params["max_tokens"] = self.default_params["max_tokens"]

            completion = self.client.chat.completions.create(**params)
            for chunk in completion:
                if chunk.choices[0].delta.content:
                    delta_content = chunk.choices[0].delta.content
                    logger.info(f"{self.model_name} response: {delta_content}")
                    yield delta_content
        except Exception as e:
            logger.error(f"Stream chat error with {self.model_name}: {e}")
            yield f"Error: {str(e)}"
