# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
from openai import AsyncOpenAI


class OpenaiChannel:
    """
    OpenAI: https://platform.openai.com
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://api.openai.com/v1"
        self.apis_key: str = apikey.get("api_key")
        self.n = 1                    # 生成条数
        self.max_tokens = 0           # 最大令牌
        self.temperature = 1.0        # 采样温度: [0, 2]
        self.presence_penalty = 0     # 话题属性: [-2.0, 2.0]
        self.frequency_penalty = 0    # 重复惩罚: [-2.0, 2.0]
        self.model = ""               # 对话模型

        if config:
            self.n = int(config.get("n", 1))
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 1.0))
            self.presence_penalty = float(config.get("presence_penalty", 0))
            self.frequency_penalty = float(config.get("frequency_penalty", 0))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            n=self.n,
            tools=tools,
            temperature=self.temperature,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty
        )


class BaiduChannel:
    """
    百度千帆: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/2m3fihw8s
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://qianfan.baidubce.com/v2"
        self.app_id: str = apikey.get("app_id")
        self.api_key: str = apikey.get("api_key")
        self.max_tokens = 0        # 最大令牌
        self.top_p = 0.7           # 多样性值: [0.1, 1.0]
        self.temperature = 1.0     # 采样温度: [0, 2]
        self.penalty_score = 1.0   # 重复惩罚: [1.0, 2.0]
        self.model = ""            # 对话模型

        if config:
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 1.0))
            self.presence_penalty = float(config.get("presence_penalty", 0))
            self.frequency_penalty = float(config.get("frequency_penalty", 0))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.api_key,
            default_headers={"appid": self.app_id}
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            tools=tools,
            extra_body={
                "penalty_score": self.penalty_score
            }
        )


class ZhipuChannel:
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://open.bigmodel.cn/api/paas/v4"
        self.apis_key: str = apikey.get("api_key")
        self.top_p = 0.7         # 多样性值: [0.0, 1.0]
        self.max_tokens = 0      # 最大令牌
        self.temperature = 0.95  # 采样温度: [0, 1.0]
        self.do_sample = False   # 采样策略
        self.model = ""          # 对话模型

        if config:
            self.n = int(config.get("n", 0))
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 0.95))
            self.do_sample = bool(config.get("do_sample", False))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            tools=tools,
            extra_body={
                "do_sample": self.do_sample
            }
        )


class SparkChannel:
    """ 讯飞星火: https://www.xfyun.cn/doc/spark/HTTP%E8%B0%83%E7%94%A8%E6%96%87%E6%A1%A3.htm """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://spark-api-open.xf-yun.com/v1"
        self.apis_key: str = apikey.get("api_key")
        self.top_p = 1.0            # 多样性值: [0.0, 1.0]
        self.top_k = 4              # 非等概率: [1, 6]
        self.max_tokens = 0         # 最大令牌
        self.temperature = 0.95     # 采样温度: [0, 1.0]
        self.presence_penalty = 0   # 话题属性: [-2.0, 2.0]
        self.frequency_penalty = 0  # 重复惩罚: [-2.0, 2.0]
        self.model = ""             # 对话模型

        if config:
            self.top_p = float(config.get("top_p", 1.0))
            self.top_k = int(config.get("top_k", 4))
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 0.95))
            self.presence_penalty = float(config.get("presence_penalty", 0))
            self.frequency_penalty = float(config.get("frequency_penalty", 0))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            tools=tools,
            extra_body={
                "top_k": self.top_k
            }
        )


class QwenChannel:
    """
    通义千问: https://help.aliyun.com/zh/model-studio
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
        self.apis_key: str = apikey.get("api_key")
        self.top_p = 0.8               # 多样性值: [0.0, 1.0]
        self.top_k = 20                # 非等概率: [1, 6]
        self.max_tokens = 0            # 最大令牌
        self.temperature = 0.7         # 采样温度: [0, 1.0]
        self.presence_penalty = 0      # 重复惩罚: [-2.0, 2.0]
        self.repetition_penalty = 1.0  # 连续惩罚: [0, 2.0]
        self.model = ""                # 对话模型

        if config:
            self.top_p = float(config.get("top_p", 1.0))
            self.top_k = int(config.get("top_k", 20))
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 0.7))
            self.presence_penalty = float(config.get("presence_penalty", 0))
            self.repetition_penalty = float(config.get("repetition_penalty", 1.0))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            max_tokens=None if self.max_tokens else self.max_tokens,
            presence_penalty=self.presence_penalty,
            tools=tools,
            extra_body={
                "top_k": self.top_k,
                "repetition_penalty": self.repetition_penalty,
            }
        )


class BaichuanChannel:
    """
    百川模型: https://platform.baichuan-ai.com/
    """
    def __init__(self, config: dict = None, apikey: dict = None):
        apikey = apikey if apikey else {}
        self.base_url: str = "https://api.baichuan-ai.com/v1"
        self.apis_key: str = apikey.get("api_key")
        self.max_tokens = 0           # 最大令牌
        self.temperature = 0.3        # 采样温度: [0, 1.0]
        self.top_p = 0.85             # 多样性值: [0, 1.0]
        self.top_k = 5                # 重复惩罚: [0, 20]
        self.model = ""               # 对话模型

        if config:
            self.n = int(config.get("n", 1))
            self.max_tokens = int(config.get("max_tokens", 0))
            self.temperature = float(config.get("temperature", 0.3))
            self.top_p = float(config.get("top_p", .85))
            self.top_k = float(config.get("top_k", 5))
            self.model = str(config.get("model", ""))

            if config.get("agency_api"):
                self.base_url = config.get("agency_api").rstrip().rstrip("/")

    def llm(self, messages: list, tools: list = None, stream=True):
        client = AsyncOpenAI(
            base_url=self.base_url,
            api_key=self.apis_key
        )

        return client.chat.completions.create(
            stream=stream,
            model=self.model,
            messages=messages,
            temperature=self.temperature,
            top_p=self.top_p,
            tools=tools,
            extra_body={
                "top_k": self.top_k
            }
        )
