from inference.models import api_model
from openai import OpenAI
import json
import base64
import hmac
import openai
import time


class gpt_4_ke(api_model):
    def __init__(self, workers=10, **kwargs):
        super().__init__(workers)
        host = kwargs.get("ip", "")
        model_id = kwargs.get("model_id", "gpt-4")
        self.workers = workers
        self.model_name = model_id
        self.temperature = 0.7
        self.max_length = 1000
        self.host = host
        self.accessKeyId = "IYHR812QCV9NQG1ZXZ0H"
        self.accessKeySecret = "1pkqLRSfzLpjH/TfPyng9v5VpC1vlideA89q8jJs"

    def gen_signature(self, path):
        algorithm = "LJ-HMAC-SHA256"
        nonce = '12345678123456781234567812345678'
        timestamp = int(time.time())

        stringToSignList = []
        stringToSignList.append("method=POST")
        stringToSignList.append("path=" + path)
        stringToSignList.append("host=" + self.host)
        stringToSignList.append("accessKeyId=" + self.accessKeyId)
        stringToSignList.append("nonce=" + nonce);
        stringToSignList.append("timestamp=" + str(timestamp))
        stringToSign = "&".join(sorted(stringToSignList))

        signature = base64.b64encode(
            hmac.new(bytes(self.accessKeySecret, encoding="utf-8"), bytes(stringToSign, encoding='utf-8'),
                     digestmod='sha256').digest())
        authorizationFragments = []
        authorizationFragments.append("accessKeyId=" + self.accessKeyId)
        authorizationFragments.append("nonce=" + nonce)
        authorizationFragments.append("timestamp=" + str(timestamp))
        authorizationFragments.append("signature=" + str(signature, encoding="utf-8"))
        sign = algorithm + " " + "; ".join(authorizationFragments)
        return sign

    def get_api_result(self, sample, require_json=True):
        question = sample["question"]
        temperature = sample.get("temperature", self.temperature)

        # def single_turn_wrapper(text):
        #     return [{"role": "user", "content": text}]
        #
        # client = OpenAI()
        # response = client.chat.completions.create(
        #     model=self.model_name,
        #     messages=single_turn_wrapper(question),
        #     temperature=temperature
        # )
        #
        # output = response.choices[0].message.content

        res = self.raw_request(question, require_json, temperature)

        return res

    def raw_request(self, question, require_json, temperature):
        path = "/v1/chat/completions"
        sign = self.gen_signature(path)
        api_base = f"http://{self.host}/v1"

        client = OpenAI(api_key=sign, base_url=api_base)
        res = "error"
        try:
            if "json" in question.lower() and self.model_name in ["gpt-3.5-turbo-1106", "gpt-4-1106-preview",
                                                                  "gpt-4-0125-preview"] and require_json:
                rsp = client.chat.completions.create(
                    model=self.model_name,
                    messages=[
                        {"role": "user", "content": f"{question}"}]
                    , stream=False, temperature=temperature,
                    response_format={"type": "json_object"}, timeout=200)
            else:
                rsp = client.chat.completions.create(
                    model=self.model_name,
                    messages=[
                        {"role": "user", "content": f"{question}"}]
                    , stream=False, temperature=self.temperature, timeout=200)
            res = rsp.choices[0].message.content
        except openai.APIConnectionError as e:
            print("The server could not be reached")
            print(e.__cause__)  # an underlying Exception, likely raised within httpx.
        except openai.RateLimitError as e:
            print("A 429 status code was received; we should back off a bit.")
        except openai.APIStatusError as e:
            print("Another non-200-range status code was received")
            print(e.status_code)
            print(e.response)

        return res


def requst_gpt4_ke(question, temperature=0, model_id="gpt-4", require_json=True):
    a = gpt_4_ke(model_id=model_id, ip="chatgpt.ke.com")
    res = a.raw_request(question, require_json, temperature)
    return res


def main():
    messages = "你是谁？"
    output = requst_gpt4_ke(messages, temperature=0, model_id="gpt-4-1106-preview", require_json=True)
    print(output)


if __name__ == '__main__':
    main()
