# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
import config
from common.log import logger
import openai
import aiohttp
import asyncio
import itertools
import random
import threading
import settings

if settings.ONLINE:
    llm_host = "localhost"
    default_llm = "self"
else:
    llm_host = "socket.chatlingxi.com"
    default_llm = "self"
config.load_config()
import copy

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()



keys = [
    ("https://api.chatanywhere.com.cn/", "sk-PPUZpPmkuKLjTSsfkn7xqaO6Gu0S1buGP4iQgh9lW4OElQ4K"),
    ("https://api.chatanywhere.com.cn/", "sk-i6KyMG38rXPdP7FkH0zneEDKRbcxdNNcvGeRZPvv4Yd6sSLv"),
    ("https://api.chatanywhere.com.cn/", "sk-7xdFhyG5Bjc9hiRO0Y2dfAdjfNYJvxFFv0Yh2inYnc01RJAw"),
    ("https://api.chatanywhere.com.cn/", "sk-3EqaSt314QvtJPvgRmJxJyOUwTbfDr66KLiCmKiXo8xTaR0n"),
    ("https://api.chatanywhere.com.cn/", "sk-Khl2TOmUNVYxvWSerRpEZ5vZHZLX4K2AFt8lnoeJHnIrCUrf"),
    ("https://api.chatanywhere.com.cn/", "sk-gPIlXLvUMpwFj9A2Xlz7yP3OGi4tIZk13hZpCEEjqcOdgLzv"),
    ("https://api.chatanywhere.com.cn/", "sk-qGDChv1yGjGBEpho6taKQiohbwgQpUJGPtWkKsRlJA11JbL1"),
    ("https://api.chatanywhere.com.cn/", "sk-Nst84xltzX2JBgLAmvpLJshAtjsknG86V9Qu4CwIcPfvfIBa"),
    ("https://api.chatanywhere.com.cn/", "sk-kkv6dQGHEwamPxGzWPrjb6ZeywqCpEIpclznkSGjfpMhmF1G"),
    ("https://api.chatanywhere.com.cn/", "sk-Wb9CV3QoeUS2p6aH06QMgVzILuLDc3eordqI6gxfSXAaXdTm"),
]

expire_keys = [
    ("https://api.chatanywhere.cn", "sk-ELfDqXCb4t9w9A2wNv7kAxyrPWPAasch3Zq6MLCusvb7hCdN"),
    ("https://api.chatanywhere.cn", "sk-qFvCZmc454XFY6zREYlAI91XFOCxjs4BnOuriIw1yLps8TkV"),
    # ("https://key.ikungpt.com/", "sk-DTMz1NGXgIMBXUmf06EaC206A5Eb4c939c607e73Ac45EeF0"),
    ("https://api.chatanywhere.cn", "sk-RuYKVoD9tbTjrwnCo2z1zBjrPEeKp8IIO7Zp3XvD48IgjPBR"),

    # ("https://api.xiaojuan.tech", "sk-NpvHbplgoXOg5E7ySJ0Xhx4osqEK36Bi7nJDjgjp4CMxN4dV"),
    # ("https://api.xiaojuan.tech", "sk-gS3VhVL2C8y87Rw57QrJMnhBZv6Nj6eczu9BbPohF7dr4Rit"),
    # ("https://api.chatanywhere.cn", "sk-teqkUzWiRKCGgEzTwNJdpRq8OAg2zqYmwc55hYgsq7hHW0Lh"),

]

backup_keys = [("https://api.chatlingxi.com", "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")]
random.shuffle(keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(backup_keys)



# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "system", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def async_chat_with_openai(self, retry_count=0):
        logger.info("gpt4_openai，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = await openai.ChatCompletion.acreate(
                model="gpt-4",  # 对话模型的名称
                messages=self.gpt_prompt,
                top_p=1,
                temperature=0.9,
                max_tokens=1024,
                request_timeout=100,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logger.error("gpt4 error: {}".format(ee))
                # if retry_count < 1:
                #     return await self.achat_with_azure(retry_count + 1)
                result = ""
            except Exception:
                logger.error("gpt4 stream retry error: {}".format(ee))
                raise TimeoutError

        return result

    async def chat_with_openai_stream(self, retry_count=0):
        logger.info("gpt4_openai, the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = await openai.ChatCompletion.acreate(
                model="gpt-4",  # 对话模型的名称
                messages=self.gpt_prompt,
                top_p=1,
                temperature=0.9,
                max_tokens=1024,
                stream=True,
                request_timeout=100,
            )

            async for chunk in response:
                if chunk:
                    data = json.loads(str(chunk))
                    yield data

        except Exception as ee:
            logger.error("gpt4 stream error: {}".format(ee))

    async def achat_with_proxy(self, retry_count=0):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key,
            }
            payload = {
                "model": "gpt-3.5-turbo",
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as ee:
            logger.error("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy(retry_count + 1)

        return result

    # def save_train_data(self, prompt, result, classname, file_name="llm", ):
    #     try:
    #         with lock:
    #             project_dir = os.path.dirname(os.path.dirname(__file__))
    #             # 带上日期的文件名yyyy-mm-dd
    #             now_date = datetime.datetime.now().strftime("%Y-%m-%d")
    #
    #             path = os.path.join(project_dir, "train_data/" + file_name + "_" + now_date + ".json")
    #             if not os.path.exists(path):
    #                 new_data = []
    #             else:
    #                 new_data = utils.jload(path)
    #             instruct_dict_new = {}
    #             id = str(uuid.uuid1())
    #             instruct_dict_new["id"] = id
    #             instruct_dict_new["conversations"] = []
    #             conversation_dict = {}
    #             conversation_dict["from"] = "human"
    #             conversation_dict["task"] = classname
    #             conversation_dict["value"] = prompt
    #             instruct_dict_new["conversations"].append(conversation_dict)
    #             conversation_dict = {}
    #             conversation_dict["from"] = "gpt"
    #             conversation_dict["task"] = classname
    #             conversation_dict["value"] = result
    #             instruct_dict_new["conversations"].append(conversation_dict)
    #             new_data.append(instruct_dict_new)
    #             utils.jdump(new_data, path)
    #
    #
    #     except Exception as ee:
    #         logger.error("save data error: {}".format(ee))

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        logger.debug("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4"
        if retry_count > 0:
            model_type = "gpt-4-0613"
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=300))
                content = await response.content.read()
            try:
                result = json.loads(content.decode())["choices"][0]['message']['content']
            except Exception as ee:
                logger.error("loggpt proxy error: {}".format(ee))
                # 后续改成递归
                async with aiohttp.ClientSession() as session:
                    response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                                  headers=headers, json=payload,
                                                  timeout=aiohttp.ClientTimeout(total=300))
                    content = await response.content.read()
                logger.info(f'gpt4 response is {content}')
                result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, )


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        return result

    async def achat_with_proxy_ernie(self, save_data=True, save_path="ernie"):
        logger.info("the ernie prompt is: {}".format(self.prompt))
        result = ""
        try:
            url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=" + ernie_token
            headers = {
                'Content-Type': 'application/json'
            }

            payload = json.dumps({
                "messages": [
                    {
                        "role": "user",
                        "content": self.prompt
                    }
                ],
                "stream": False,
            })

            async with aiohttp.ClientSession() as session:
                async with session.post(url, headers=headers, data=payload) as response:
                    response_data = await response.text()

                    result = json.loads(response_data)["result"]

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result, save_path)


        except Exception as ee:
            logger.exception("ernie proxy error: {}".format(ee))
        return result

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))

    async def achat_with_proxy_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-0314",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt4-0314 stream again error: {}".format(ee))

    def chat_with_azure(self, retry_count=0):
        logger.info("gpt3_5_azure，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = openai.ChatCompletion.create(
                api_type="azure",
                api_version="2023-03-15-preview",
                api_base="https://lingxi-openai.openai.azure.com",
                api_key="45a5ee249f364e208dd950f87ab5aba7",
                engine="gpt-35",
                messages=self.gpt_prompt,
                temperature=0.7,
                max_tokens=1024,
                request_timeout=15,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logger.error("gpt3.5 azure error: {}".format(ee))
                if retry_count < 1:
                    return self.chat_with_azure(retry_count + 1)
                result = ""
            except openai.error.InvalidRequestError:
                raise openai.error.InvalidRequestError("token limit", None)
            except openai.error.Timeout:
                raise TimeoutError
            except Exception:
                logger.error("gpt3.5 stream retry error: {}".format(ee))
                return ""

        return result

    async def achat_self_llm(self, model_name="planner_llama-2_13b_new"):
        async for x in self.achat_self_llm_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        return x["text"]

    async def achat_self_llm_stream(self, model_name="planner_llama-2_13b_new"):
        logger.info("the llm prompt is :{}".format(self.prompt))
        default_llm = "planner_llama-2_13b_new"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.9,
                "max_new_tokens": 1024,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data


    async def achat_qwen_llm(self, model_name="Qwen-7B-Chat"):
        async for x in self.achat_qwen_llm_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        return x["text"]

    async def achat_qwen_llm_stream(self, model_name="Qwen-7B-Chat"):
        logger.info("the qwen llm prompt is :{}".format(self.prompt))
        default_llm = "Qwen-7B-Chat"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "sep": "<|im_end|>",
            "stop_token_ids": [
                151643,
                151644,
                151645,
            ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
            "stop_str": "<|endoftext|>",
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "sep": "<|im_end|>",
            "stop_token_ids": [
                151643,
                151644,
                151645,
            ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
            "stop_str": "<|endoftext|>",
            "echo": False,
        }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data


    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
        elif type == 'qwen' and is_stream:
            return self.achat_qwen_llm_stream()
        elif type == "qwen" and not is_stream:
            return await self.achat_qwen_llm()
        elif type == "gpt" and is_stream:
            return self.achat_with_proxy_stream_gpt4()
        elif type == "gpt" and not is_stream:
            return await self.achat_with_proxy_gpt4()
        elif type == "ernie" and not is_stream:
            return await self.achat_with_proxy_ernie()
        elif type == "baichuan_planner" and is_stream:
            return self.achat_self_llm_stream("baichuan_planner")
        elif type == "baichuan_planner" and not is_stream:
            return await self.achat_self_llm("baichuan_planner")
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
作为专业的、非常懂营销的信贷领域的销售员，在电话对话中，你的任务是根据与用户的对话来识别和判断用户当前的借款情境。用户情境列表可以包括以下几种：\n          1. 有无APP：未知,未安装APP,有京东APP,有京东金融APP。\n          2. 当前提额状态：判断用户去金条提额入口"提升额度"的状态:还未提额,提额成功,提额失败。\n          3. 当前借款状态：判断用户金条借款的状态，还未借款,审核中,到账途中,借款成功,借款失败,无法借款。\n          4. 当前优惠券状态：判断用户金条优惠券的状态:已获得优惠券,已使用优惠券。还款券不要识别为这个情境。\n          5. 当前降息状态：判断用户金条降息的状态:已获得降息,已使用降息。\n          6. 当前优惠券状态：判断用户金条账户的优惠券状态之一:想要优惠券,无优惠券,已获得优惠券,已使用优惠券。\n          7. 是否打开了APP：判断用户是否跟随销售员的指引打开了手机APP：未打开,已打开。默认情况是-未打开\n          8. 是否已经查看到了自己的额度：判断用户是否跟随销售员的指引打开手机APP并且查看了自己的额度:未查看,已查看。默认情况是-未查看\n\n\n    注意事项:\n          1.输出格式：key：value格式。key为情境，value为情境的描述。情境和描述之间用"："分割，同一个情境的多个描述之间用"、"分割，多个情境之间用"｜"分割\n          2.用户情境需要根据对话记录和用户情境列表来识别，对话记录中未提及的情境，不要在结果输出。对话记录中识别出的信息及状态均若不在用户情境中的情况，也不要输出。如果没有识别到任何情境，请直接返回"无"。\n          3.请根据销售员与用户的对话记录，总结用户当前的情境，不需要回答用户的具体问题。\n          4."额度是否提取"表示"当前借款状态"，而不是"当前提额状态"。\n          5.识别和判断的依据主要是用户在对话记录中明确表述出来的，一步一步分析，不要无中生有。\n          6.所有信息必须是用户明确表述出来的，一般情况下，一个信息点只输出一个结果即可\n    对话记录如下：\n    ===\n    销售员:您好，请问是王女士吗？\n用户:是的\n销售员:您好，我是京东金融的客户经理，工号95266，给您致电是金条对部分优质用户做了利息临时下调，像日常消费，旅游装修都可以用，具体以系统审核为准，您有留意到么？\n用户:没注意，怎么了？\n\n    ===\n    根据对话记录和用户情境列表，一步一步推理出的用户情境（格式为key：value格式）：
    """
    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        result = asyncio.run(chat.achat_auto_llm(type="Colossal-7B-financial-1019"))
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        logger.info(result)
