# llm_api.py
# import os
# import asyncio
# from openai import AsyncOpenAI
# from config import DEEPSEEK_API_KEY, DEEPSEEK_API_URL, REPLY_CONFIG,DOUBAO_API_KEY, DOUBAO_API_URL
# from core.logger import log
#
#
# class LLMClient:
#     def __init__(self, model_name: str, api_key: str, api_url: str):
#         """
#         初始化 LLM 客户端
#         :param model_name: 模型名称，如 'deepseek-chat', 'doubao-1-5-pro-256k-250115'
#         :param api_key: API Key
#         :param api_url: API 地址（支持 OpenAI 兼容接口）
#         """
#         self.model_name = model_name
#         self.api_key = api_key
#         self.api_url = api_url
#
#         # 初始化 AsyncOpenAI 客户端
#         self.client = AsyncOpenAI(
#             base_url=self.api_url,
#             api_key=self.api_key
#         )
#
#         # 读取 prompt
#         cur_dir = os.path.dirname(os.path.abspath(__file__))
#         prompt_file_path = os.path.join(cur_dir, "..", "deep_prompt.txt")
#         with open(prompt_file_path, "r", encoding="utf-8") as f:
#             self.deep_prompt = f.read()
#
#     async def get_reply(self, message: str, description_message: str, sender: str = "用户", retry: int = 3):
#         """
#         获取单条消息回复
#         """
#         is_valid, result = self._check_msg(message)
#         if not is_valid:
#             return result
#
#         params = self._build_params(message, description_message, sender)
#         return await self._fetch_reply(params, retry)
#
#     async def get_reply_with_history(self, all_messages, description_message: str, sender: str = "用户", retry: int = 3):
#         """
#         获取带历史记录的回复
#         """
#         is_valid, result = self._check_msg(all_messages[-1]["content"])
#         if not is_valid:
#             return result
#
#         params = self._build_history_params(all_messages, description_message, sender)
#         return await self._fetch_reply(params, retry)
#
#     def _build_params(self, message: str, description_message: str, sender: str) -> dict:
#         """
#         构建单条消息的请求参数
#         """
#         prompt = REPLY_CONFIG["prompt_template"].format(
#             ms=description_message,
#             message=message,
#             sender=sender
#         )
#         return {
#             "model": self.model_name,
#             "messages": [
#                 {"role": "system", "content": self.deep_prompt},
#                 {"role": "user", "content": prompt}
#             ],
#             "temperature": REPLY_CONFIG["temperature"],
#             "max_tokens": REPLY_CONFIG["max_tokens"]
#         }
#
#     def _build_history_params(self, all_messages, description_message: str, sender: str = "用户") -> dict:
#         """
#         构建带历史记录的请求参数
#         """
#         messages = [{"role": "system", "content": self.deep_prompt}]
#
#         for msg in all_messages:
#             role = "user" if msg["type"] == "接收" else "assistant"
#             messages.append({"role": role, "content": msg["content"]})
#
#         prompt = REPLY_CONFIG["prompt_template"].format(
#             ms=description_message,
#             message=messages[-1]["content"],
#             sender=sender
#         )
#         messages[-1]["content"] = "目前是多轮对话，不需要再加称呼，直接按照提示词要求进行回复  " + prompt
#
#         return {
#             "model": self.model_name,
#             "messages": messages,
#             "temperature": REPLY_CONFIG["temperature"],
#             "max_tokens": REPLY_CONFIG["max_tokens"]
#         }
#
#     async def _fetch_reply(self, params: dict, retry: int = 3):
#         """
#         异步调用 LLM 并返回结果
#         """
#         for attempt in range(retry):
#             try:
#                 completion = await self.client.chat.completions.create(**params)
#                 reply = completion.choices[0].message.content
#
#                 if REPLY_CONFIG.get("reply_prefix"):
#                     reply = REPLY_CONFIG["reply_prefix"] + reply
#
#                 return reply
#
#             except Exception as e:
#                 print(f"API请求异常 (尝试 {attempt + 1}/{retry}): {str(e)}")
#                 await asyncio.sleep(1)
#
#         return "生成回复失败，请稍后再试"
#
#     def _check_msg(self, message: str):
#         """
#         校验单条消息是否合法
#         """
#         if not message or not message.strip():
#             return False, "消息内容为空，无法生成回复"
#
#         for keyword in REPLY_CONFIG["blacklist"]:
#             if keyword in message:
#                 log.info(message)
#                 return False, f"消息包含黑名单关键词 '{keyword}'，不生成回复"
#         return True, ""
#
#     # def _check_all_msg(self, all_messages):
#     #     """
#     #     校验多条消息是否合法
#     #     """
#     #     if not all_messages:
#     #         return False, "消息内容为空，无法生成回复"
#     #
#     #     for message in all_messages:
#     #         if message["type"] == "接收":
#     #             for keyword in REPLY_CONFIG["blacklist"]:
#     #                 if keyword in message["content"]:
#     #                     log.info(message["content"])
#     #                     return False, f"消息包含黑名单关键词 '{keyword}'，不生成回复"
#     #     return True, ""
#
#
#
#
# class DoubaoClient(LLMClient):
#     def __init__(self):
#         super().__init__(
#             model_name="doubao-1-5-pro-256k-250115",
#             api_key=DOUBAO_API_KEY,
#             api_url=DOUBAO_API_URL)
#
# class DeepSeekClient(LLMClient):
#         def __init__(self):
#             super().__init__("deepseek-chat",
#                              DEEPSEEK_API_KEY,
#                              DEEPSEEK_API_URL)
