# -*- coding:utf-8 -*-

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import openai
import requests
import json
import config
import logging
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', level=logging.INFO)

from openai.api_requestor import parse_stream

llm_host = "180.184.75.50"
config.load_config()

class LLMAgent(object):
    def __init__(self, prompt, origin_prompt=None):
        self.prompt = prompt
        self.origin_prompt = origin_prompt
        self.gpt_prompt = [
            {"role": "system", "content": "你是一个专业的助理，你可以做任何事情"},
            {"role": "user", "content": prompt},
        ]
        self.model_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: " + self.prompt + "ASSISTANT:"

    def chat_llm(self):
        payload = {
            "model": 'vicuna-13b',
            "prompt": self.model_prompt,
            "temperature": 0.7,
            "max_tokens": 2048
        }
        logging.info("the model prompt is: {}".format(self.model_prompt))

        ret = requests.post(
            "http://" + llm_host + ":21001" + "/get_worker_address", json={"model": "checkpoints"}
        )
        worker_addr = ret.json()["address"]
        worker_addr = worker_addr.replace("localhost", llm_host)

        response = requests.post(
            # 'http://180.184.75.50:21004/worker_generate_completion',
            worker_addr + "/worker_generate_completion",
            json=payload,
            timeout=100,
        )

        # 通过http获取response
        result = json.loads(response.json())["text"].strip()
        logging.info("the llm response is: {}".format(result))
        # response = self.pipeline(prompt, max_new_tokens=num_output)[0]["generated_text"]

        # only return newly generated tokens
        return result

    def chat_with_gpt4(self, save_data=True, retry_count=0, temperature=0.9, agent_name='agentXX'):
        logging.info("{}, the gpt4 prompt is: {}".format(agent_name, self.gpt_prompt))
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer sk-IL8Kyk1hwKPSt0M5sKUrbzlvxFUhrkNho4kHdkTQtXUx1OPA"   # 离线调优使用这个key！
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "temperature": temperature
        }
        response = requests.post(
            "https://api.xiaojuan.ml/v1/chat/completions",
            json=payload,
            # stream=True,
            headers=headers,
        )
        print('GPT4_GPT4_GPT4_RESPONSE: %s' % response.json())
        result = response.json()["choices"][0]['message']['content']

        if save_data:
            train_data = {}
            train_data["instruction"] = self.gpt_prompt[0]["content"]
            # oringin_prompt 是否在self中
            if self.origin_prompt:
                train_data["input"] = self.origin_prompt
            else:
                train_data["input"] = self.gpt_prompt[1]["content"]
            train_data["output"] = result
            # 异步将结果写文件, 编码为utf-8
            with open("train_data.json", "a") as f:
                json.dump(train_data, f, ensure_ascii=False)
                f.write("\n")

        return result

    def chat_with_azure(self, retry_count=0, temperature=0.9):
        logging.info("gpt3_5_azure，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = openai.ChatCompletion.create(
                api_type="azure",
                api_version="2023-03-15-preview",
                api_base="https://lingxi-openai.openai.azure.com",
                api_key="45a5ee249f364e208dd950f87ab5aba7",
                engine="gpt-35",
                messages=self.gpt_prompt,
                temperature=temperature,
                max_tokens=2048,
                request_timeout=10,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            logging.error("gpt3.5 error: {}".format(ee))
            if retry_count < 1:
                return self.chat_with_azure(retry_count + 1)
            result = ""

        return result

    def chat_with_gpt4_stream(self, retry_count=0, temperature=0.9):
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer sk-HDiGyMBSLweZDwmoVqL9j1eo5KVGXFkqC9VR8ukG6APmflow"
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": temperature
        }
        response = requests.post(
            "https://api.xiaojuan.ml/v1/chat/completions",
            json=payload,
            stream=True,
            headers=headers
        )
        for chunk in parse_stream(response.iter_lines()):
            if chunk:
                data = json.loads(chunk)
                yield data

    def chat_with_gpt3_5(self, save_data=False, retry_count=0, temperature=0.9, agent_name='agentXX'):
        logging.info("{}, the gpt prompt is: {}".format(agent_name, self.gpt_prompt))
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer sk-IL8Kyk1hwKPSt0M5sKUrbzlvxFUhrkNho4kHdkTQtXUx1OPA"
        }
        payload = {
            "model": "gpt-3.5-turbo",
            "messages": self.gpt_prompt,
            "temperature": temperature
        }
        try:
            response = requests.post(
                "https://api.xiaojuan.ml/v1/chat/completions",
                json=payload,
                # stream=True,
                headers=headers,
            )
            #print(11111, response)
            result = response.json()["choices"][0]['message']['content']
        except Exception as e:
            logging.error("{}, gpt3.5 error: {}".format(agent_name, e))
            if retry_count < 1:
                return self.chat_with_gpt3_5(save_data, retry_count + 1)


        if save_data:
            train_data = {}
            train_data["instruction"] = self.gpt_prompt[0]["content"]
            # oringin_prompt 是否在self中
            if self.origin_prompt:
                train_data["input"] = self.origin_prompt
            else:
                train_data["input"] = self.gpt_prompt[0]["content"]
            train_data["output"] = result
            # 异步将结果写文件, 编码为utf-8
            with open("train_data.json", "a") as f:
                json.dump(train_data, f, ensure_ascii=False)
                f.write("\n")

        return result

    def chat_with_gpt3_5_stream(self):
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer sk-HDiGyMBSLweZDwmoVqL9j1eo5KVGXFkqC9VR8ukG6APmflow"
        }
        payload = {
            "model": "gpt-3.5-turbo",
            "messages": self.gpt_prompt,
            "stream": True,
        }
        response = requests.post(
            "https://api.xiaojuan.ml/v1/chat/completions",
            json=payload,
            stream=True,
            headers=headers
        )
        for chunk in parse_stream(response.iter_lines()):
            if chunk:
                data = json.loads(chunk)
                yield data



    def chat_llm_stream(self):
        logging.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": 'vicuna-13b',
            "prompt": self.model_prompt,
            "temperature": 0.7,
            "max_new_tokens": 1024,
            "stop": None,
            "stop_token_ids": None,
            "echo": False,
        }

        ret = requests.post(
            "http://" + llm_host + ":21001" + "/get_worker_address", json={"model": "checkpoints"}
        )
        worker_addr = ret.json()["address"]
        worker_addr = worker_addr.replace("localhost", llm_host)

        response = requests.post(
            worker_addr + "/worker_generate_stream",
            json=payload,
            stream=True,
            timeout=20,
        )
        for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
            if chunk:
                data = json.loads(chunk.decode())
                yield data
if __name__ == "__main__":
    chat = LLMAgent("给我个长篇的文字和公主的通话故事，用于胎教")
    result = chat.chat_with_gpt3_5(save_data=False)
    print(result)

