# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
# import config
from common.log import logger
import aiohttp
import asyncio
import itertools
import random
import uuid
from util_tool import utils
import os
import datetime
import threading
import settings

from internal_server.bot.insurance_planner_gpt.context_local_util import local_data

if settings.ONLINE:
    llm_host = "172.30.0.22"
    llm_host_test = '172.26.4.253'
    default_llm = "self"
else:
    llm_host = "socket.chatlingxi.com"
    llm_host_test = 'socket.chatlingxi.com'
    llm_host = "116.196.97.15"
    llm_host_test = '8.142.69.133'
    default_llm = "self"
logger.info('llm host:{}'.format(llm_host))
# config.load_config()
import copy

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

keys = [

    ("https://api.chatanywhere.com.cn/", "sk-fPabEZoZhYONpJKebMh8hO2Cf4BtcmVIzzoNtMrhN4a4PN16"),
    ("https://api.chatanywhere.com.cn/", "sk-hPsGy4D9kGkV2APuR8OdOUkYQr5XQjSr7mY2SADDJ95ABP0p"),
    ("https://api.chatanywhere.com.cn/", "sk-ENeLI2bhHCydqg4ErWoq6teG4Hu3KzmwCoyoZmdG9D5HbfxY"),
    ("https://api.chatanywhere.com.cn/", "sk-sU1EowMgsVd6Or6RmNzuJQNdmQdCl26i45HRk8akiS7RSNy8"),
    ("https://api.chatanywhere.com.cn/", "sk-9Lp9W6nuFsmfFsCGNaIpXNRi8KSI4qzSUGmGVG0zxTRgLHJU"),
]

train_keys = [
    # ("https://api.chatanywhere.com.cn/", "sk-wWZiRMreyhtnwGm6tckAsSb3hJR5YYmVyFQTRzKTWNwQZmXi"),
    # ("https://api.chatanywhere.com.cn/", "sk-pRq1Rn0OiOo2yM8rK2Dt1FhQl4uoWkzku8dDuJ2Y2FF8mIXo"),
    # ("https://api.chatanywhere.com.cn/", "sk-Wf1bsQQ1jYX1koWw7uUCH5HSDWJUi6OR8Xx8YXmWyVCv8sk0"),
    # ("https://api.chatanywhere.com.cn/", "sk-BWhs8Hn3bz2C9eqlk2ZjI6oXC4n33whMPWrFXis4suryxV77"),
    # ("https://api.chatanywhere.com.cn/", "sk-xuqalfD6UNQbkb0RfVUjNbXbd1OosvOrc7R2uvud42gTPibN"),
    # ("https://api.chatanywhere.com.cn/", "sk-mAk7ME8MbmYt4zs76cKGIKjxsgFHEgPyejGJjfDf3hOBoGLP"),
    ("https://api.chatanywhere.com.cn/", "sk-zM6x5IUg0q6u3gtGc4WLxlkiwrKnumqS83JiWVDsXRaW1PLj"),
    ("https://api.chatanywhere.com.cn/", "sk-Vf6Y5ZrW7nDCzEs7bv3a0jJ4RUGE1Z4fly6hJLqPok1PlEBF"),
    # ("https://api.chatanywhere.com.cn/", "sk-CfQl1kVdPM13JiX6q6l9XAcM3O6aLhkNz2A0OBq0pusEHNck"),
    # ("https://api.chatanywhere.com.cn/", "sk-FdXROLxHzyxMhKXY5QzhLwh4IRlTTEL4x7sPk9gCCmbicOdu"),
    # ("https://api.chatanywhere.com.cn/", "sk-SExov0P7oCH9QaWPND3aa7W7hN8M4ZDsmgFkVFTczJDvUZTs"),
    ("https://api.chatanywhere.com.cn/", "sk-znEgLNyNdYShdo81hsXwGljGmD0AONv4iMBHbxZqbBVkwxWV"),
    # ("https://api.chatanywhere.com.cn/", "sk-njfT1tzFaEaUrxCoUaYqz66rRSLlJKW5fZU5WM7n2OLtKnke"),
    # ("https://api.chatanywhere.com.cn/", "sk-5AwmTKGWaUEARvD3xWzihqDDey3HrTYBFTXMFxRvd26HUlaG"),
    ("https://api.chatanywhere.com.cn/", "sk-tpXAB0WyMjwhxhkpXqPQy38HsGoU13xmQFLT8XFFsPOyzH5A"),
    # ("https://api.chatanywhere.com.cn/", "sk-lM4eNRqXXi34cniqBRZ2Kq5Os3ZMPIikFeIyrg1ISmzdF5po"),
    ("https://api.chatanywhere.com.cn/", "sk-kMS9VEWCE5eRvLJpk7QyjYQdxJ7OJnG3opu4VMYgEYBvUTNC"),
    # ("https://api.chatanywhere.com.cn/", "sk-e4fhhETc254w2MOVmjLJYR8cvevYpsvupaKL2JJQE7msCmqI"),
    # ("https://api.chatanywhere.com.cn/", "sk-hCgQFaAB1BHA3NX4ocybDoNWbLsUr9RlCvfyP715s0fIi56s"),
    # ("https://api.chatanywhere.com.cn/", "sk-7GHAuvERby2d1HbcObyPriyQNlpK20LecH17wfVezux2G9cs"),
]

azure_keys = [
    # ("https://lx-gpt-australia-east.openai.azure.com/", "lx-gpt-4-ae", "f31f66c8601d488eac461ade81c98da1"),
    # ("https://lx-gpt-canada-east.openai.azure.com/", "lx-gpt-4-ce", "6349021ec7fa47a7b526e92bf638ccf3"),
    # ("https://lx-gpt-east-us2.openai.azure.com/", "lx-gpt-4-eu2", "aefe46a980ba4a5bb24845f5618b05ce"),
    # ("https://lx-gpt-france-central.openai.azure.com/", "lx-gpt-4-fc", "21b9bf1adb5b48c2a6d177a02e0c0816"),
    # ("https://lx-gpt-norway-east.openai.azure.com/", "lx-gpt-4-ne", "ebbb7c0d824f437394f3ccdf56bdaf64"),
    ("https://lx-gpt-south-india.openai.azure.com/", "lx-gpt-4-sl", "42a1c23922394c559a335e9786e5b358"),
    # ("https://lx-gpt-sweden-central.openai.azure.com/", "lx-gpt-4-sc", "7b175824ea6c42e8a07c8456704bb791"),
    # ("https://lx-gpt-uk-south.openai.azure.com/", "lx-gpt-4-us", "b896d019f5964300a93b821351ac9017"),
    ("https://lx-gpt-west-u.openai.azure.com/", "lx-gpt-4-wu", "340e512ddc7848bf814570dccd8ec149"),
]


keys = train_keys

backup_keys = [("https://api.chatlingxi.com", "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")]
random.shuffle(keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(backup_keys)
azure_data_cycle = itertools.cycle(azure_keys)

# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "system", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def achat_with_proxy(self, retry_count=0):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key,
            }
            payload = {
                "model": "gpt-3.5-turbo",
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as ee:
            logger.error("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy(retry_count + 1)

        return result

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4-1106-preview"
        if retry_count > 0:
            model_type = "gpt-4-1106-preview"
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, )


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        return result

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))

    async def achat_with_proxy_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-0314",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt4-0314 stream again error: {}".format(ee))

    async def achat_self_llm(self, model_name="wechat_output_0319", retry_count=0, save_data=True, save_path="llm"):
        from internal_server.bot.insurance_planner_gpt import context_local_util
        try:
            session_id = context_local_util.session_id
            message_id = context_local_util.message_id
        except Exception:
            session_id = ""
            message_id = ""

        logger.info('message_id:{}'.format(message_id))

        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            # start_time = time.time()
            # logger.info("the llm prompt is :{}".format(self.prompt))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + '116.196.71.48' + ":22809" + "/worker_generate",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:
                    content = await response.content.read()
            result = str(json.loads(content.decode())["text"]).strip()

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, model_name,
                                     message_id)  # session_id + ":" +
            # logger.info("the llm prompt spend is :{}".format(time.time() - start_time))
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("llm proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_self_llm(model_name, retry_count + 1)

        return result

    async def achat_self_llm_financial_loan_7b_b(self, model_name="wechat_output", retry_count=0):
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            # start_time = time.time()
            # logger.info("the llm prompt is :{}".format(self.prompt))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + '116.198.37.30' + ":22809" + "/worker_generate",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:
                    content = await response.content.read()
            result = json.loads(content.decode())["text"]
            # logger.info("the llm prompt spend is :{}".format(time.time() - start_time))
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("llm proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_self_llm_financial_loan_7b_b(model_name, retry_count + 1)

        return result

    async def achat_self_llm_test(self, model_name):
        result = ""
        async for x in self.achat_self_llm_test_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        if x and 'text' in x:
            result = x["text"]
        return result

    async def achat_self_llm_test_stream(self, model_name):
        logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host_test + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host_test + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_self_llm_financial_first_loan(self, model_name):
        result = ""
        async for x in self.achat_self_llm_financial_first_loan_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        if x and 'text' in x:
            result = x["text"]
        return result

    async def achat_self_llm_financial_first_loan_stream(self, model_name):
        logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":11001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":11001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_self_llm_stream(self, model_name="planner_llama-2_13b_new"):
        # logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            # 'top_p': 0.95,
            # "do_sample": False,
            # "best_of": 5,
            # "use_beam_search": True,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            start_time = time.time()
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
            # logger.info("the llm prompt is :{}".format(self.prompt))

        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.3,
                "max_new_tokens": 128,
                # "do_sample": False,
                # 'top_p': 0.95,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data


    async def achat_with_azure_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""

        result = ""
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
                "api-key": key
            }
            payload = {
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                "model": "gpt-4-1106-preview",
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(
                    url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                    headers=headers, json=payload,
                    # timeout=aiohttp.ClientTimeout(total=100)
                )
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "azure-gpt-4",
                                     message_id) # session_id + ":" +

        except Exception as ee:
            logger.error("the azure key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("azure gpt error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_azure_gpt4(retry_count + 1)
        # logger.info(
        #     f"当前会话:{session_id},当前任务：{self.__class__.__name__} \n the gpt prompt is: {self.gpt_prompt[0].get('content')}\n{result}")
        return result

    async def achat_with_azure_stream_gpt4(self, save_data=True, save_path="llm"):

        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        headers = {
            "Content-Type": "application/json",
            "api-key": key
        }
        payload = {
            "messages": self.gpt_prompt,
            "stream": True,
            "model": "gpt-4-1106-preview",
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post(
                        url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                        headers=headers, json=payload) as response: #,timeout=aiohttp.ClientTimeout(total=500)
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result
        except Exception as ee:
            # 打印key
            logger.error("the azure key is: {}".format(key))
            logger.exception("azure gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(azure_data_cycle)
                url = key_tuple[0]
                model = key_tuple[1]
                key = key_tuple[2]
                headers = {
                    "Content-Type": "application/json",
                    "api-key": key
                }
                payload = {
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(
                            url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                            headers=headers, json=payload) as response: #,timeout=aiohttp.ClientTimeout(total=500)
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the azure key is: {}".format(key))
                logger.exception("azure gpt4 stream again error: {}".format(ee))


    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
        elif type == "gpt" and is_stream:
            # return self.achat_with_proxy_stream_gpt4()
            return self.achat_with_azure_stream_gpt4()
        elif type == "gpt" and not is_stream:
            # return await self.achat_with_proxy_gpt4()
            return await self.achat_with_azure_gpt4()
        elif type == 'llm_test':
            return await self.achat_self_llm_test(type)
        elif type == 'financial_first_loan':
            return await self.achat_self_llm_financial_first_loan(type)
        elif type == 'financial_loan_7b_b':
            return await self.achat_self_llm_financial_loan_7b_b(type)
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
    作为一个专业的、非常懂营销的信贷领域的销售员，你正通过电话和用户沟通，你需要根据当前的对话记录用简洁的一句短语总结用户的当前想要解决的问题或疑议。    
&nbsp;    
需要注意以下要求：    
1.如果用户表达是一句完整的话，不需要联系上文的销售员说的话，如果不是完整的，需要联系上文销售员说的话。    
2.用户问题包括：非本人、询问什么平台、是否会查征信、利息多少，还款方式、提前还款、下款时间，不满投诉、投诉倾向、别给我打电话了，怎么有我的号码，询问工号，静音未回复，怎么注销，已经注销，打电话目的，不愿意参与兜底降息、提额、优惠券等活动，不会操作，通话中没有网络，为什么要操作，咨询京东其他产品，操作麻烦，不愿意加微，有人联系过，咨询兜底降息、提额、优惠券等产品/活动信息，是否有销售任务，挂机后自己操作，多次不需要，多次在忙，表示在开车或开会，多次表示在开车或开会等等；    
3.如果用户当前没有问题，则输出"无"，不要无中生有。    
4.用户表示已经提额成功或借款成功，表示用户无问题。    
5.用户表述额度相关问题，询问额度多少，表示额度低，想提升额度，用户问题分别是额度多少，额度低，如何提升额度。    
6.用户表示不能用，额度不能用，无法提供服务，综合评分不足等，用户问题是无法正常借款。    
7.用户表示没有额度了、额度用完了等，用户问题是额度已用完。    
8.用户询问利息多少，想降低利率，用户问题分别是利息多少，如何降低利率。    
9.用户想要息费减免券、折扣券、优惠券、打折券、降低利率利息、觉得利息高、有利率不满问题等，用户问题是利息高。    
10.用户表示是智能助手，语音助手，电话助理，请录音，录音超时等时，用户其实并非真人而是AI回复，这时用户问题是语音信箱。    
11.用户表示稍后查看，之后操作，之后会考虑，有需要再说等，用户问题是会考虑。用户肯定回应算是用户无问题。    
12.用户表示会使用APP，会操作APP，知道操作流程，用户问题是自己会操作。    
13.用户表示在忙，上班等没有时间进行沟通时，用户问题是在忙。    
14.用户未明白销售员的表述含义，或者希望销售员简明点表述，用户问题是没听懂。    
15.用户想打断销售员、用户要求销售员暂停，用户问题是打断。    
16.用户询问怎么去操作，去哪操作，怎么去查看，去哪查看，用户问题是如何操作。    
17.用户询问是哪个APP，用户问题是申请渠道。    
18.用户表述不需要，不用等拒绝含义，用户问题是不需要。用户可能会用不缺钱、使用其他平台等来表示自己不需要，这时应返回"不需要-不缺钱"、"不需要-使用其他平台"。用户一句话中超过3次表述不需要，不用，用户问题是强烈拒绝。    
19.用户一句话中或上下文中超过3次表述不需要，不用，用户问题是强烈拒绝。    
20.用户一句话中或上下文中表述在忙、会考虑、稍后再说等相同意思的次，如果超过三次，需要加上"多次表示"的前缀，如多次表示在忙、多次表示会考虑、多次表示稍后再说等。    
21.用户询问免息券，优惠券的作用是什么，用户问题是：优惠券作用。免息券，优惠券使用期限，用户问题是：优惠券期限。    
22.用户询问如何去查看免息券，优惠券，用户问题是如何查看优惠券。用户询问如何去使用免息券，优惠券，用户问题是如何使用优惠券。    
23.用户担心借了之后还不起，借了之后造成信用损失，用户问题是还不起。    
24.用户只有明确否定是本人，用户问题才是非本人。在核身阶段单纯的"你说"等，应是无问题，询问"你哪位"等是"询问什么平台"。    
25.用户询问我们是不是机器人，用户问题是询问人还是机器人。    
26.用户在上下文中表述自己正在开车/开会的，则用户问题是表示在开车/开会。  
===    
销售员:喂您好，请问您是李先生吗？    
用户:您好，有什么事情请说    
销售员:您好，我是京东金融的客户经理，工号667716，是这样的，金条为了提高用户使用体验，在微信上给部分用户开通了优化额度利率的专属服务通道，之后有提额降息的活动会第一时间通过微信通知您，那我这边会用官方企业微信添加您，辛苦您那边操作同意一下好吧？    
用户:微信吗？ 没有。    
销售员:您别误会哈，加上之后不会打扰您的，主要是为了方便您后续在使用京东金融时有咨询的问题，我们能及时帮助您解决 欸~您好，您先别挂哈，来电不是让您办业务的，是京东金融安排我成为您日后的专属客户经理，之后任何提额降息、借款免息活动我就能第一时间通知您，或者您使用产品遇到问题也可以直接咨询我，我现在通过官方企业微信添加您，麻烦您通过一下可以吗？    
用户:行了，我晚点，晚点晚点，我现在这里正在忙啊。    
&nbsp;    
===    
结合对话记录和注意要求，用一句短语简洁总结用户当前的问题（如果有多个只返回最需要解决的一个），用户问题及依据是：
    """

    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        start_time = time.time()
        # result = asyncio.run(chat.achat_auto_llm(type="gpt"))
        # result = asyncio.run(chat.achat_auto_llm(type="llm_test"))
        result = asyncio.run(chat.achat_auto_llm(type="self"))
        logger.info("this spend is :{}".format(time.time() - start_time))
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        logger.info(result)
