# -*- coding: UTF-8 -*-
# 2023/8/8 21:55


import requests
from requests import Timeout
import json
from askingquestions_log import log_out

logger = log_out()


class send_requests:
    """
    提供token获取、各个模型请求方法
    注意新模型方法名,和模型下拉框里的名字保持一致否则不能正确调用对应方法
    """

    @staticmethod
    def get_access_token(API_Key, Secret_Key):
        logger.info(f"本次更新token请求\n\tAPI_Key:{API_Key}\n\tSecret_Key:{Secret_Key}")
        url = f"https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={API_Key}&client_secret={Secret_Key}"
        payload = json.dumps("")
        headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            access_token = response.json().get("access_token")
            logger.info(f"请求access_token返回: {response.json()}")
        except Timeout:
            access_token = "请求超时,请重试"
            logger.warning(f"请求token超时！")

        return access_token

    @staticmethod
    def ERNIE_Bot_turbo(messages: list, access_token: str, **kwargs):
        """
        ERNIE-Bot-turbo是百度自行研发的大语言模型，覆盖海量中文数据，具有更强的对话问答、内容创作生成等能力，响应速度更快。
        """
        logger.info(f"本次请求\n\tmodel: ernie_bot_turbo\n\taccess_token：{access_token}\n\tmessages: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant?access_token={access_token}"
        headers = {'Content-Type': 'application/json'}
        payload = json.dumps({"messages": messages, **kwargs})

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"ERNIE_Bot_turbo请求超时！")

        return response.json().get("result")

    @staticmethod
    def ERNIE_Bot(messages: list, access_token: str, **kwargs):
        """
        ERNIE-Bot是百度自行研发的大语言模型，覆盖海量中文数据，具有更强的对话问答、内容创作生成等能力。
        """
        logger.info(f"本次请求\n\tmodel: ernie_bot\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token={access_token}"
        headers = {'Content-Type': 'application/json'}
        payload = json.dumps({"messages": messages, **kwargs})

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"ERNIE_Bot请求超时！")

        return response.json().get("result")

    @staticmethod
    def ERNIE_Bot_4(messages: list, access_token: str, **kwargs):
        """
        ERNIE-Bot是百度自行研发的大语言模型，覆盖海量中文数据，具有更强的对话问答、内容创作生成等能力。
        """
        logger.info(f"本次请求\n\tmodel: ERNIE_Bot_4\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro?access_token={access_token}"
        headers = {'Content-Type': 'application/json'}
        payload = json.dumps({"messages": messages, **kwargs})

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"ERNIE_Bot请求超时！")

        return response.json().get("result")

    @staticmethod
    def BLOOMZ_7B(messages: list, access_token: str, **kwargs):
        """
        BLOOMZ-7B是业内知名的大语言模型，由BigScience研发并开源，能够以46种语言和13种编程语言输出文本。
        """
        logger.info(f"本次请求\n\tmodel: bloomz_7b\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/bloomz_7b1?access_token=={access_token}"
        headers = {'Content-Type': 'application/json'}

        payload = json.dumps({"messages": messages, **kwargs})

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"BLOOMZ_7B请求超时！")

        return response.json().get("result")

    @staticmethod
    def Llama_2_7b_chat(messages: list, access_token: str, **kwargs):
        """
        Llama-2-7b-chat由Meta AI研发并开源，在编码、推理及知识应用等场景表现优秀，Llama-2-7b-chat是高性能原生开源版本，适用于对话场景。
        """
        logger.info(f"本次请求\n\tmodel: Llama_2_7b_chat\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_7b?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"Llama_2_7b_chat请求超时！")

        return response.json().get("result")

    @staticmethod
    def Llama_2_13b_chat(messages: list, access_token: str, **kwargs):
        """
        Llama-2-13b-chat由Meta AI研发并开源，在编码、推理及知识应用等场景表现优秀，Llama-2-13b-chat是性能与效果均衡的原生开源版本，适用于对话场景。
        """
        logger.info(f"本次请求\n\tmodel: Llama_2_13b_chat\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_13b?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"Llama_2_13b_chat请求超时！")

        return response.json().get("result")

    @staticmethod
    def Llama_2_70b_chat(messages: list, access_token: str, **kwargs):
        """
        Llama-2-70b-chat由Meta AI研发并开源，在编码、推理及知识应用等场景表现优秀，Llama-2-70b-chat是高精度效果的原生开源版本。
        """
        logger.info(f"本次请求\n\tmodel: Llama_2_70b_chat\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/llama_2_70b?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"Llama_2_70b_chat请求超时！")

        return response.json().get("result")

    @staticmethod
    def Qianfan_Chinese_Llama_2_7B(messages: list, access_token: str, **kwargs):
        """
        Qianfan-Chinese-Llama-2-7B是千帆团队在Llama-2-7b基础上的中文增强版本，在CMMLU、C-EVAL等中文数据集上表现优异。
        """
        logger.info(f"本次请求\n\tmodel: Qianfan_Chinese_Llama_2_7B\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/qianfan_chinese_llama_2_7b?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"Qianfan_Chinese_Llama_2_7B请求超时！")

        return response.json().get("result")

    @staticmethod
    def Qianfan_BLOOMZ_7B_compressed(messages: list, access_token: str, **kwargs):
        """
        Qianfan-BLOOMZ-7B-compressed是千帆团队在BLOOMZ-7B基础上的压缩版本，融合量化、稀疏化等技术，显存占用降低30%以上。本文介绍了相关API。
        """
        logger.info(f"本次请求\n\tmodel: Qianfan_BLOOMZ_7B_compressed\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/qianfan_bloomz_7b_compressed?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"Qianfan_BLOOMZ_7B_compressed请求超时！")

        return response.json().get("result")

    @staticmethod
    def ChatGLM2_6B_32K(messages: list, access_token: str, **kwargs):
        """
        ChatGLM2-6B-32K是在ChatGLM2-6B的基础上进一步强化了对于长文本的理解能力，能够更好的处理最多32K长度的上下文。
        """
        logger.info(f"本次请求\n\tmodel: Qianfan_Chinese_Llama_2_7B\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/chatglm2_6b_32k?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"ChatGLM2_6B_32K请求超时！")

        return response.json().get("result")

    @staticmethod
    def AquilaChat_7B(messages: list, access_token: str, **kwargs):
        """
        Qianfan-Chinese-Llama-2-7B是千帆团队在Llama-2-7b基础上的中文增强版本，在CMMLU、C-EVAL等中文数据集上表现优异。
        """
        logger.info(f"本次请求\n\ttmodel: Qianfan_Chinese_Llama_2_7B\n\taccess_token：{access_token}\n\tmessage: {messages}\n\tkwargs: {kwargs}")
        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/qianfan_chinese_llama_2_7b?access_token={access_token}"
        payload = json.dumps({"messages": messages, **kwargs})
        headers = {'Content-Type': 'application/json'}

        try:
            response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))
            logger.info(f"提问请求返回: {response.json()}")
        except Timeout:
            response = {"result": "请求超时,请重试"}
            logger.warning(f"AquilaChat_7B请求超时！")

        return response.json().get("result")

    @staticmethod
    def Stable_Diffusion_XL(body: dict, access_token: str, **kwargs):
        """
        根据参数请求图生文结果
        """
        logger.info(f"本次文生图请求\n\taccess_token：{access_token}\n\tbody: {body}\n\tkwargs: {kwargs}")

        url = f"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/text2image/sd_xl?access_token={access_token}"
        payload = json.dumps(body)
        headers = {"Content-Type": "application/json"}
        response = requests.request("POST", url, headers=headers, data=payload, timeout=(60, None))

        # base64图片列表
        try:
            img_list = [img_list["b64_image"] for img_list in response.json()["data"]]
            return img_list
        except:
            logger.info(f"返回数据解析错误:{response.json()}")
            return [
                b"iVBORw0KGgoAAAANSUhEUgAAAJkAAAA1CAIAAADd4N54AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAEXRFWHRTb2Z0d2FyZQBTbmlwYXN0ZV0Xzt0AAAcnSURBVHic7ZxfTNtGHMcd2yH/QNpL+CMeCGqlTWnoE6SIF0or1q1ap3Vi6iYktFYrEu2oqKi6iSIqpnbTuqKyshWJbrBFilQkpEoro2NRSzNViJZqmyCkL0PAA0raPILzz7GzB7eXwzj2ObVD5vnz5HN+d/ezv75/vzswpNNpTEcT4DvtgI5i6FpqB11L7aBrqR3InXbg/wIbCjMLiyCJV1YSLqcixgBdyzzBLCxSvf0gaTrWYskujyxjgN7HagddS+2ga5kn8MpKlYwzuXLIo5MDKJOX3IwBupbaQddSO+haagddS+0gI1aQmp1j/llmn0e4JF5qJ3bvIhvqX90JetoHro2HmtH9SW9scNfE3hq8ohylImZ5JR2NgjtkzR7EvCq5pCDSWrKhcMJ7i77vZyOR7b/idrvxQKOp9UNxvzeOtzOBIEi+Nv+Qu4hdHUxOTqWpzMu1QXLGrg4mxicyP126yP2UmLidGPXA/piOtVjOdWWrPTFxm57xpx4/EfhpfALDMMJRVXTksKmtVeQRlHUJL7VL1iXXWEJLetoX+/aGoIocbCSSGJ+g7/utvZ/JbaPU+Qv0jF9WFgzDEh5vbGgY0Tg1Oxe7OQp/RoIwq2uxoeHknSnL2c4cehpZLnHgZaWKG4uNlwmPl+rtFxESwEYiVE9fanYO1TsMi37xZS5CTtxGf2sJj5fq6ZMUEsCsrlE9fQmPVz2XVCVru9z+reF2e9E7bxO7qrkks7ySnLwLlE5T0eilr4t/HEYZJJhAMHlnCiQJRxVWbGNXVuHOVtirUQ/sj6HMnn4WEfzaBNsK6a4lqh1kzR4umVpcSj2aZ1bXgEGaisaGhvGyUvRhG90ltRHWkgkE46M/w3fMx9vMp9rhO0YMM59qh/tJNhKJXRuyXbksWWv85bdvOtYCj7X0tE8kfJW894B7R6S71tLRDoIjTCDIrq/DlqnZOZ7/hMtp7e7ixVM4wehpX/yHn2BFo199U4I2c0F3KQ8I97HRgUG4iVg6O3hCAmxXLsMviJ7xo/RpnPyWzg7LuS74lRkPNYuEr7hcxqbG4u8HYTPC5eQ1o9jNUdh/Y1NjydhItpKNh5pt1wcIRxW4k6ai8Zujkk8hy6U8IKAlPe2D9ZCc4Fm7t8zW6D8eolSMOG/kQTiqJNs9z3+ULHhFufXiBYPNmink/gM2FFbKJWCJYpaDMYeAlolffgXXBpvVfPKERK0uJ1wxPS8w9eeBUqwg5k8+lrSB/UfMgnEt6cB+kExTUdp3TymXXlBsQ7WUa4xhmKCW8DqMdNehDBvkvjpwjdLHIhbLA7fbUTou2H/CUYXe15nefw9O0o/mlXIpP/C1hEMwGIaR7tocCpXsnXIrlqyvk7Th+79POguAcDlxe2ZVzq6siRiju5Q3+PPY1OISnGSCT2NXpR8ptbSlLTILi+LNjnzjdWQPM6AMITz/wfIDEUOZHXu5nEBZV+QwqqmHRNwHXgUqSI57rXJiJRyGkhJZ9kS1Ax4j2FBY/KPMwSX10Pg+idyAnMFqhZPwwUZlUePIiMa1LFjUODIi0cfaLl1ErzJT996aHHKpARMI5tafcxTOHBUF6T2v/9bz8GDX12VpyZvEKQtR7VDJmIOvJW+rjN3YlFvizoJvnVgyyytGOdnTzzJzV8XnqNa+HpWMOfjjJbF7F5xkgk/llriz8FY7KEEoABMIwusQQuZ6Zsfha0k21OcWliwQeAFFJhBE379M/vY7nDTKiTMUAgLzWNKdeYY0FU14b+XRHwUwNjXCyejAIEouNhROTmYW0wUVnENEQEvz1u2LxPgELzCWDXraF78xooxfr0DR0XfhroUJBKnzF8SzsKEwdaYb3iYznWhTw7eExxu/MYLYVcgyxgS1JFzOoiOH4TtUb7+4SKnZuc3TXVRvP3zEbafAK8rNn3bAd+gZ/+bprmyDBRMIUme64b1o0l1rajmqrFdsKLzxQWtsaDg+5tk43i5+EkWWMUB4TWI+eYJZXIIfLz7mSU7eNR5o5EU4tx+zKARMLUeZ4FM4AJl6/GTjozbSXUe6nCDwxj57Tj+a553PIxxV1t7PFXcpdm0IfkuxoWFj88FsAUJZxgBhLfGKcsvZTqqnD+52uCN38InCQsba15PepODjYWkqSs/4xQ+MEY4q2/UBNc61bo/Ui+xAyDIGZI3hkQ31xd8Nyg01Fb31Jrq92tiuXLZ0dsBjpzhFRw6rJCSGYfBu2os72aOssowBYnEfwuUsGRtJeLzJO1MivajBZiXddUUH9xfgxM/U1mpsPihyVpvD2NSotv+Ws53syip4jZbODpF2IssYYED8v1tMIJj68y/wBwgceKld1vHDnSU1O5f6e4E3OyNr9uTNfzYUpn332OcRY0O95AaOLGMOVC11Ch99z0s76FpqB11L7aBrqR10LbWDrqV20LXUDrqW2kHXUjv8C6EUSTje8zt/AAAAAElFTkSuQmCC"]
