# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import json
from openai import AsyncOpenAI


class OpenaiChannel:
    """
    OpenAI: https://platform.openai.com/docs/api-reference/responses/create?lang=python
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://api.openai.com/v1"
        self.apis_key: str = apikey.get("api_key")
        self.n = 1                     # 生成条数
        self.top_p = None              # 核采样值: [0, 1.0]
        self.temperature = None        # 采样温度: [0, 2.0]
        self.presence_penalty = None   # 存在惩罚: [-2.0, 2.0]
        self.frequency_penalty = None  # 频率惩罚: [-2.0, 2.0]
        self.max_tokens = None         # 最大令牌
        self.think = False             # 深度思考
        self.model = ""                # 对话模型
        self.extra = {}                # Body参数

        if config:
            self.n = int(config.get("n", 1))
            self.top_p = config.get("top_p")
            self.temperature = config.get("temperature")
            self.presence_penalty = config.get("presence_penalty")
            self.frequency_penalty = config.get("frequency_penalty")
            self.max_tokens = config.get("max_tokens")
            self.think = bool(config.get("think", False))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")
            if self.think:
                self.extra["reasoning"] = {
                    "effort": "high",
                    "summary": "auto"
                }
            if config.get("extra"):
                self.extra.update(json.loads(config.get("extra")))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            n=self.n,
            tools=tools,
            top_p=self.top_p,
            max_tokens=self.max_tokens,
            temperature=self.temperature,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            extra_body=self.extra if self.extra else None
        )


class SparkChannel:
    """ 讯飞星火: https://www.xfyun.cn/doc/spark/X1http.html """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://spark-api-open.xf-yun.com/v2"
        self.apis_key: str = apikey.get("api_key")
        self.top_k = None               # 非等概率: [1, 6]
        self.top_p = None               # 核采样值: [0.0, 1.0]
        self.temperature = None         # 采样温度: [0.0, 2.0]
        self.presence_penalty = None    # 话题属性: [0.0, 3.0]
        self.frequency_penalty = None   # 重复惩罚: [0.0, 1.0]
        self.max_tokens = 0             # 最大令牌
        self.model = ""                 # 对话模型
        self.extra = {}                 # Body参数

        if config:
            self.top_k = config.get("top_k")
            self.top_p = config.get("top_p")
            self.temperature = config.get("temperature")
            self.presence_penalty = config.get("presence_penalty")
            self.frequency_penalty = config.get("frequency_penalty")
            self.max_tokens = config.get("max_tokens")
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")
            if self.top_k is not None:
                self.extra["top_k"] = self.top_k
            if config.get("extra"):
                self.extra.update(json.loads(config.get("extra")))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            top_p=self.top_p,
            temperature=self.temperature,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            tools=tools,
            extra_body=self.extra if self.extra else None
        )


class ZhipuChannel:
    """ https://docs.bigmodel.cn/cn/guide/start/concept-param """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://open.bigmodel.cn/api/paas/v4"
        self.apis_key: str = apikey.get("api_key")
        self.top_p = None        # 核采样值: [0.0, 1.0]
        self.temperature = None  # 采样温度: [0.0, 1.0]
        self.do_sample = None    # 采样策略: [True, False]
        self.max_tokens = None   # 最大令牌
        self.think = False       # 深度思考
        self.model = ""          # 对话模型
        self.extra = {}          # Body参数

        if config:
            self.temperature = config.get("temperature")
            self.max_tokens = config.get("max_tokens")
            self.top_p = config.get("top_p")
            self.do_sample = config.get("do_sample", True)
            self.think = bool(config.get("think", False))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

            self.extra["thinking"] = {"type": "disabled"}
            if self.think:
                self.extra["thinking"]["type"] = "enabled"
            if self.do_sample is not None:
                self.extra["do_sample"] = self.do_sample
            if config.get("extra"):
                self.extra.update(config.get("extra"))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            max_tokens=self.max_tokens,
            top_p=self.top_p,
            tools=tools,
            extra_body=self.extra if self.extra else None
        )


class BaiduChannel:
    """
    百度千帆: https://cloud.baidu.com/doc/qianfan-api/s/3m7of64lb
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://qianfan.baidubce.com/v2"
        self.app_id: str = apikey.get("app_id")
        self.api_key: str = apikey.get("api_key")
        self.top_p = None               # 核采样值: [0.1, 1.0]
        self.temperature = None         # 采样温度: [0.0, 2.0]
        self.frequency_penalty = None   # 降低模型逐字重复: [-2, 2.0]
        self.presence_penalty = None    # 增加模型谈论新主题: [-2, 2.0]
        self.repetition_penalty = None  # 降低模型生成的重复度: [0, 2.0]
        self.max_tokens = None          # 最大令牌
        self.think = False              # 深度思考
        self.model = ""                 # 对话模型
        self.extra = {}                 # Body参数

        if config:
            self.top_p = config.get("top_p")
            self.temperature = config.get("temperature")
            self.frequency_penalty = config.get("frequency_penalty")
            self.presence_penalty = config.get("presence_penalty")
            self.repetition_penalty = config.get("repetition_penalty")
            self.max_tokens = config.get("max_tokens")
            self.think = bool(config.get("think", False))
            self.model = str(config.get("model", ""))
            self.extra = {
                "repetition_penalty": self.repetition_penalty
            }

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")
            if self.think:
                self.extra["enable_thinking"] = True
            if config.get("extra"):
                self.extra.update(json.loads(config.get("extra")))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.api_key,
            default_headers={"appid": self.app_id}
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            top_p=self.top_p,
            temperature=self.temperature,
            frequency_penalty=self.frequency_penalty,
            presence_penalty=self.presence_penalty,
            tools=tools,
            extra_body=self.extra if self.extra else None
        )


class QwenChannel:
    """
    通义千问: https://help.aliyun.com/zh/model-studio
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
        self.apis_key: str = apikey.get("api_key")
        self.top_p = None               # 多样性值: [0.0, 1.0]
        self.top_k = None               # 非等概率: [1, 6]
        self.max_tokens = None          # 最大令牌
        self.temperature = None         # 采样温度: [0, 1.0]
        self.presence_penalty = None    # 重复惩罚: [-2.0, 2.0]
        self.repetition_penalty = None  # 连续惩罚: [0, 2.0]
        self.model = ""                 # 对话模型
        self.extra = {}                 # Body参数

        if config:
            self.top_p = config.get("top_p")
            self.top_k = config.get("top_k")
            self.max_tokens = config.get("max_tokens")
            self.temperature = config.get("temperature")
            self.presence_penalty = config.get("presence_penalty")
            self.repetition_penalty = config.get("repetition_penalty")
            self.model = str(config.get("model", ""))
            self.extra = {
                "top_k": self.top_k,
                "repetition_penalty": self.repetition_penalty,
            }

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")
            if config.get("extra"):
                self.extra.update(json.loads(config.get("extra")))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            max_tokens=None if self.max_tokens else self.max_tokens,
            presence_penalty=self.presence_penalty,
            tools=tools,
            extra_body=self.extra if self.extra else None
        )


class BaichuanChannel:
    """
    百川模型: https://platform.baichuan-ai.com/
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://api.baichuan-ai.com/v1"
        self.apis_key: str = apikey.get("api_key")
        self.max_tokens = None     # 最大令牌
        self.temperature = None    # 采样温度: [0, 1.0]
        self.top_p = None          # 多样性值: [0, 1.0]
        self.top_k = None          # 重复惩罚: [0, 20]
        self.model = ""            # 对话模型
        self.extra = {}            # Body参数

        if config:
            self.n = int(config.get("n", 1))
            self.max_tokens = config.get("max_tokens")
            self.temperature = config.get("temperature")
            self.top_p = config.get("top_p")
            self.top_k = config.get("top_k")
            self.model = str(config.get("model", ""))
            self.extra = {
                "top_k": self.top_k
            }

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")
            if config.get("extra"):
                self.extra.update(json.loads(config.get("extra")))

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            tools=tools,
            extra_body=self.extra if self.extra else None
        )
