'''
Author: xubing xubing613@foxmail.com
Date: 2024-11-21 21:46:21
LastEditTime: 2025-04-21 21:58:24
FilePath: /myUtils/myclients/openai_client.py
Description: OpenAI兼容的LLM客户端
'''

from typing import Any, Dict, Generator, List, Optional, Union

from pydantic import BaseModel
from openai import OpenAI

from myUtils.logger import setup_logger

logger = setup_logger()


class Message(BaseModel):
    role: str
    content: Union[str, List[Dict[str, Any]]]


class LLMResponse(BaseModel):
    content: str
    status: bool = True
    error_msg: Optional[str] = None


class OpenAILLMClient():
    """OpenAI兼容的LLM客户端"""
    def __init__(self, api_key, base_url, model_name):
        self.api_key = api_key
        self.base_url = base_url
        self.model_name = model_name
        try:
            self.client = OpenAI(
                api_key=self.api_key,
                base_url=self.base_url,
            )
        except Exception as e:
            logger.error(f"Failed to initialize OpenAI client: {e}")
            raise

    # 只展示需要修改的部分
    def chat(self, messages: List[Message]) -> LLMResponse:
        try:
            # 使用默认参数
            params = {
                "model": self.model_name,
                "messages": messages,
                "stream": False,
                "temperature": 0,
            }

            # 如果设置了max_tokens，添加到参数中
            # if "max_tokens" in self.default_params:
            #     params["max_tokens"] = self.default_params["max_tokens"]

            completion = self.client.chat.completions.create(**params)
            result = completion.choices[0].message.content
            # logger.info(f"{self.model_name} response: {result}")
            logger.info(f"{result}")
            return LLMResponse(content=result)
        except Exception as e:
            logger.error(f"Chat error with {self.model_name}: {e}")
            return LLMResponse(content="", status=False, error_msg=str(e))

    def chat_stream(self,
                    messages: List[Message]) -> Generator[str, None, None]:
        try:
            # 使用默认参数
            params = {
                "model": self.model_name,
                "messages": messages,
                "stream": True,
                "temperature": 0,
            }

            # 如果设置了max_tokens，添加到参数中
            # if "max_tokens" in self.default_params:
            #     params["max_tokens"] = self.default_params["max_tokens"]

            completion = self.client.chat.completions.create(**params)
            
            for chunk in completion:
                if chunk.choices[0].delta.content:
                    delta_content = chunk.choices[0].delta.content
                    # logger.info(f"{self.model_name} response: {delta_content}")
                    yield delta_content
        except Exception as e:
            logger.error(f"Stream chat error with {self.model_name}: {e}")
            yield f"Error: {str(e)}"
