# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
import config
from bot.insurance_planner_gpt.context_local_util import local_data
from common.log import logger
import openai
import aiohttp
import asyncio
import itertools
import random
import uuid
from util_tool import utils
import os
import datetime
import threading
import settings

if settings.ONLINE:
    llm_host = "localhost"
    self_llm_host = "172.24.0.18"
    default_llm = "self"
else:
    llm_host = "socket.chatlingxi.com"
    self_llm_host = "101.126.8.103"
    default_llm = "self"
config.load_config()
import copy

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

keys = [
    ("https://api.chatanywhere.com.cn/", "sk-JI5NqdeafrmzcplhXbGjRUzW5oT6i6biELNTU3lUXzDLmiVD"),
    ("https://api.chatanywhere.com.cn/", "sk-BnBC2j8lL9RsUCfYttBmRsIhBJVs0FvBO3IeYsLCKnBryTKN"),
    ("https://api.chatanywhere.com.cn/", "sk-bIT68n0Hyt9WrwxUrHwIvrWfUnUHzblzUIjMwjGB9UEHwZXa"),
    ("https://api.chatanywhere.com.cn/", "sk-tB2vS3SJJekBwc31QwMHbCh49YPWW3UobmbvuRx3wiN0hDvI"),
    ("https://api.chatanywhere.com.cn/", "sk-Jh7m16d7RVUijvKqIFtIQJ0dbVbADa7W1lGb0ba0t0Uy71qK"),
]

train_keys = [
    ("https://api.chatanywhere.com.cn/", "sk-wWZiRMreyhtnwGm6tckAsSb3hJR5YYmVyFQTRzKTWNwQZmXi"),
    ("https://api.chatanywhere.com.cn/", "sk-pRq1Rn0OiOo2yM8rK2Dt1FhQl4uoWkzku8dDuJ2Y2FF8mIXo"),
    ("https://api.chatanywhere.com.cn/", "sk-Wf1bsQQ1jYX1koWw7uUCH5HSDWJUi6OR8Xx8YXmWyVCv8sk0"),
    ("https://api.chatanywhere.com.cn/", "sk-BWhs8Hn3bz2C9eqlk2ZjI6oXC4n33whMPWrFXis4suryxV77"),
    ("https://api.chatanywhere.com.cn/", "sk-xuqalfD6UNQbkb0RfVUjNbXbd1OosvOrc7R2uvud42gTPibN"),
    ("https://api.chatanywhere.com.cn/", "sk-mAk7ME8MbmYt4zs76cKGIKjxsgFHEgPyejGJjfDf3hOBoGLP"),
    ("https://api.chatanywhere.com.cn/", "sk-zM6x5IUg0q6u3gtGc4WLxlkiwrKnumqS83JiWVDsXRaW1PLj"),
    ("https://api.chatanywhere.com.cn/", "sk-Vf6Y5ZrW7nDCzEs7bv3a0jJ4RUGE1Z4fly6hJLqPok1PlEBF"),
    ("https://api.chatanywhere.com.cn/", "sk-CfQl1kVdPM13JiX6q6l9XAcM3O6aLhkNz2A0OBq0pusEHNck"),
    ("https://api.chatanywhere.com.cn/", "sk-FdXROLxHzyxMhKXY5QzhLwh4IRlTTEL4x7sPk9gCCmbicOdu"),
    ("https://api.chatanywhere.com.cn/", "sk-SExov0P7oCH9QaWPND3aa7W7hN8M4ZDsmgFkVFTczJDvUZTs"),
    ("https://api.chatanywhere.com.cn/", "sk-znEgLNyNdYShdo81hsXwGljGmD0AONv4iMBHbxZqbBVkwxWV"),
    ("https://api.chatanywhere.com.cn/", "sk-njfT1tzFaEaUrxCoUaYqz66rRSLlJKW5fZU5WM7n2OLtKnke"),
    ("https://api.chatanywhere.com.cn/", "sk-5AwmTKGWaUEARvD3xWzihqDDey3HrTYBFTXMFxRvd26HUlaG"),
    ("https://api.chatanywhere.com.cn/", "sk-tpXAB0WyMjwhxhkpXqPQy38HsGoU13xmQFLT8XFFsPOyzH5A"),
    ("https://api.chatanywhere.com.cn/", "sk-lM4eNRqXXi34cniqBRZ2Kq5Os3ZMPIikFeIyrg1ISmzdF5po"),
    ("https://api.chatanywhere.com.cn/", "sk-kMS9VEWCE5eRvLJpk7QyjYQdxJ7OJnG3opu4VMYgEYBvUTNC"),
    ("https://api.chatanywhere.com.cn/", "sk-e4fhhETc254w2MOVmjLJYR8cvevYpsvupaKL2JJQE7msCmqI"),
    ("https://api.chatanywhere.com.cn/", "sk-hCgQFaAB1BHA3NX4ocybDoNWbLsUr9RlCvfyP715s0fIi56s"),
    ("https://api.chatanywhere.com.cn/", "sk-7GHAuvERby2d1HbcObyPriyQNlpK20LecH17wfVezux2G9cs"),
]

expire_keys = [
    ("https://api.chatanywhere.cn", "sk-ELfDqXCb4t9w9A2wNv7kAxyrPWPAasch3Zq6MLCusvb7hCdN"),
    ("https://api.chatanywhere.cn", "sk-qFvCZmc454XFY6zREYlAI91XFOCxjs4BnOuriIw1yLps8TkV"),
    # ("https://key.ikungpt.com/", "sk-DTMz1NGXgIMBXUmf06EaC206A5Eb4c939c607e73Ac45EeF0"),
    ("https://api.chatanywhere.cn", "sk-RuYKVoD9tbTjrwnCo2z1zBjrPEeKp8IIO7Zp3XvD48IgjPBR"),

    # ("https://api.xiaojuan.tech", "sk-NpvHbplgoXOg5E7ySJ0Xhx4osqEK36Bi7nJDjgjp4CMxN4dV"),
    # ("https://api.xiaojuan.tech", "sk-gS3VhVL2C8y87Rw57QrJMnhBZv6Nj6eczu9BbPohF7dr4Rit"),
    # ("https://api.chatanywhere.cn", "sk-teqkUzWiRKCGgEzTwNJdpRq8OAg2zqYmwc55hYgsq7hHW0Lh"),

]

azure_keys = [
    ("https://lx-gpt-australia-east.openai.azure.com/", "lx-gpt-4-ae", "f31f66c8601d488eac461ade81c98da1"),
    ("https://lx-gpt-canada-east.openai.azure.com/", "lx-gpt-4-ce", "6349021ec7fa47a7b526e92bf638ccf3"),
    ("https://lx-gpt-east-us2.openai.azure.com/", "lx-gpt-4-eu2", "aefe46a980ba4a5bb24845f5618b05ce"),
    ("https://lx-gpt-france-central.openai.azure.com/", "lx-gpt-4-fc", "21b9bf1adb5b48c2a6d177a02e0c0816"),
    ("https://lx-gpt-norway-east.openai.azure.com/", "lx-gpt-4-ne", "ebbb7c0d824f437394f3ccdf56bdaf64"),
    ("https://lx-gpt-south-india.openai.azure.com/", "lx-gpt-4-sl", "42a1c23922394c559a335e9786e5b358"),
    ("https://lx-gpt-sweden-central.openai.azure.com/", "lx-gpt-4-sc", "7b175824ea6c42e8a07c8456704bb791"),
    ("https://lx-gpt-uk-south.openai.azure.com/", "lx-gpt-4-us", "b896d019f5964300a93b821351ac9017"),
    ("https://lx-gpt-west-u.openai.azure.com/", "lx-gpt-4-wu", "340e512ddc7848bf814570dccd8ec149"),
]

backup_keys = [("https://api.chatlingxi.com", "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")]
random.shuffle(keys)
random.shuffle(azure_keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(backup_keys)
azure_data_cycle = itertools.cycle(azure_keys)

# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "system", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def async_chat_with_openai(self, retry_count=0):
        logger.info("gpt4_openai，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = await openai.ChatCompletion.acreate(
                model="gpt-4",  # 对话模型的名称
                messages=self.gpt_prompt,
                top_p=1,
                temperature=0.9,
                max_tokens=1024,
                request_timeout=100,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logger.error("gpt4 error: {}".format(ee))
                # if retry_count < 1:
                #     return await self.achat_with_azure(retry_count + 1)
                result = ""
            except Exception:
                logger.error("gpt4 stream retry error: {}".format(ee))
                raise TimeoutError

        return result

    async def chat_with_openai_stream(self, retry_count=0):
        logger.info("gpt4_openai, the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = await openai.ChatCompletion.acreate(
                model="gpt-4",  # 对话模型的名称
                messages=self.gpt_prompt,
                top_p=1,
                temperature=0.9,
                max_tokens=1024,
                stream=True,
                request_timeout=100,
            )

            async for chunk in response:
                if chunk:
                    data = json.loads(str(chunk))
                    yield data

        except Exception as ee:
            logger.error("gpt4 stream error: {}".format(ee))

    async def achat_with_proxy(self, retry_count=0):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key,
            }
            payload = {
                "model": "gpt-3.5-turbo",
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as ee:
            logger.error("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy(retry_count + 1)

        return result

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4"
        if retry_count > 0:
            model_type = "gpt-4-0314"
        try:

            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "gpt-4",
                                     session_id + ":" + message_id)


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        return result

    async def achat_with_proxy_ernie(self, save_data=True, save_path="ernie"):
        logger.info("the ernie prompt is: {}".format(self.prompt))
        result = ""
        try:
            url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=" + ernie_token
            headers = {
                'Content-Type': 'application/json'
            }

            payload = json.dumps({
                "messages": [
                    {
                        "role": "user",
                        "content": self.prompt
                    }
                ],
                "stream": False,
            })

            async with aiohttp.ClientSession() as session:
                async with session.post(url, headers=headers, data=payload) as response:
                    response_data = await response.text()

                    result = json.loads(response_data)["result"]

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result, save_path)


        except Exception as ee:
            logger.exception("ernie proxy error: {}".format(ee))
        return result

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))

    async def achat_with_proxy_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-0314",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt4-0314 stream again error: {}".format(ee))

    def chat_with_azure(self, retry_count=0):
        logger.info("gpt3_5_azure，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = openai.ChatCompletion.create(
                api_type="azure",
                api_version="2023-03-15-preview",
                api_base="https://lingxi-openai.openai.azure.com",
                api_key="45a5ee249f364e208dd950f87ab5aba7",
                engine="gpt-35",
                messages=self.gpt_prompt,
                temperature=0.7,
                max_tokens=1024,
                request_timeout=15,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logger.error("gpt3.5 azure error: {}".format(ee))
                if retry_count < 1:
                    return self.chat_with_azure(retry_count + 1)
                result = ""
            except openai.error.InvalidRequestError:
                raise openai.error.InvalidRequestError("token limit", None)
            except openai.error.Timeout:
                raise TimeoutError
            except Exception:
                logger.error("gpt3.5 stream retry error: {}".format(ee))
                return ""

        return result

    async def achat_self_llm(self, model_name="planner_llama-2_13b_new", save_data=True, save_path="llm"):
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        async for x in self.achat_self_llm_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        if save_data:
            loop.run_in_executor(None, self.save_train_data, self.prompt, x["text"],
                                 self.__class__.__name__, save_path, "self",
                                 session_id + ":" + message_id)

        return x["text"]

    async def achat_test_llm_stream(self, model_name="planner_llama-2_13b_new"):
        pass

    async def achat_self_llm_stream(self, model_name="planner_llama-2_13b_new"):
        logger.info("the llm prompt is :{}".format(self.prompt))
        default_llm = "planner_llama-2_13b_new"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.7,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + self_llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.9,
                "max_new_tokens": 1024,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_qwen_llm(self, model_name="Qwen-7B-Chat"):
        async for x in self.achat_qwen_llm_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        return x["text"]

    async def achat_qwen_llm_stream(self, model_name="Qwen-7B-Chat"):
        logger.info("the qwen llm prompt is :{}".format(self.prompt))
        default_llm = "Qwen-7B-Chat"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "sep": "<|im_end|>",
            "stop_token_ids": [
                151643,
                151644,
                151645,
            ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
            "stop_str": "<|endoftext|>",
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": model_name,
                "prompt": self.model_prompt,
                "temperature": 0.9,
                "max_new_tokens": 1024,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "sep": "<|im_end|>",
                "stop_token_ids": [
                    151643,
                    151644,
                    151645,
                ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
                "stop_str": "<|endoftext|>",
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_with_azure_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
                "api-key": key
            }
            payload = {
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "azure-gpt-4",
                                     session_id + ":" + message_id)

        except Exception as ee:
            logger.error("the azure key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("azure gpt error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_azure_gpt4(retry_count + 1)

        return result

    async def achat_with_azure_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        headers = {
            "Content-Type": "application/json",
            "api-key": key
        }
        payload = {
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result
        except Exception as ee:
            # 打印key
            logger.error("the azure key is: {}".format(key))
            logger.exception("azure gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(azure_data_cycle)
                url = key_tuple[0]
                model = key_tuple[1]
                key = key_tuple[2]
                headers = {
                    "Content-Type": "application/json",
                    "api-key": key
                }
                payload = {
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the azure key is: {}".format(key))
                logger.exception("azure gpt4 stream again error: {}".format(ee))


    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
        elif type == 'qwen' and is_stream:
            return self.achat_qwen_llm_stream()
        elif type == "qwen" and not is_stream:
            return await self.achat_qwen_llm()
        elif type == "gpt" and is_stream:
            return self.achat_with_proxy_stream_gpt4()
        elif type == "gpt" and not is_stream:
            return await self.achat_with_proxy_gpt4()
        elif type == "azure" and is_stream:
            return self.achat_with_azure_stream_gpt4()
        elif type == "azure" and not is_stream:
            return await self.achat_with_azure_gpt4()
        elif type == "ernie" and not is_stream:
            return await self.achat_with_proxy_ernie()
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
    作为一个专业的泰康的客户经理,场景是在电话通话中，你的主要任务是根据以下销售流程
收集用户信息（年龄、姓氏、用户是否同意参观、为何人考虑养老社区、需要什么房型），
销售流程依次是:开场、介绍泰康养老社区并收集用户的年龄、收集用户的姓氏、询问是否
同意参观、收集用户是为何人考虑养老社区、向用户介绍并确认用户需要什么样的房型、询
问用户对年金险是否有兴趣、向用户说结束语。

注意以下事项：
1.你通话的目的是按照流程，按照顺序依次收集用户的信息。如果当前流程向用户收集的信
息未收集到，可以在不引起用户反感的情况下通过下一轮对话来尽可能地收集。
2.你需要分析用户是否真正回答了你的问题，如果用户没有正面回答，需要停在当前流程继
续向用户确认。如：询问用户姓氏，用户回复"贵姓啊贵"，则用户没有证明回答。
3.如果用户有连续两轮拒绝、强烈拒绝（连续三次拒绝可认为是强烈拒绝，如："不需要不
需要不需要"）、抱怨、不高兴、激动、不耐烦、投诉、骂人、说脏话、报警等负面情绪时
，直接进行流程:"向用户说结束语"。
4.以下三种情况命中一项则默认用户是为自己考虑养老社区的，跳过"收集用户是为何人考
虑养老社区"这一步流程：（1）用户的父母不在了。（2）用户的父母去世了。（3）用户的年龄大于65岁。
5.如果用户回复内容为"@@quiet@@"，代表用户静音没有说话。如果连续三轮交互用户都为
静音，则直接进行直接进行流程:"向用户说结束语"。
6.为了防止用户的反感，同一流程的问题最多连续问用户两次，问两次后进行下一流程。
7.输出结论即可，不需要给出理由。

用户信息如下:
 {"城市":"西安","年龄":"47岁","姓氏":"万","用户是否同意参观":"是","为何人考虑":"父母"，所需房型:"双人间"}

对话记录如下:
===
泰康的客户经理:喂，您好，您好，我这边是泰康高品质养老社区体验官，给您来电是想邀
请您和家人来参观咱们泰康的高品质养老社区，提前为养老做规划！耽误您2分钟简单给您
做个介绍好吧？
用户:好啊
泰康的客户经理: 泰康养老社区是全国第一的养老品牌，现在有免费参观的活动，我给您发个邀请函，我听您声音还年轻的，冒昧问下您今年多大了啊？
用户:四十七岁
泰康的客户经理: 那回头您可以来参观下咱们园区的居住环境，活动设施，医疗条件，不管是给您自己未来规划，还是给父母家人了解都是非常合适的，我姓刘，您叫我小刘就可以了，冒昧问下您贵姓啊？
用户:姓万
泰康的客户经理: 万老师，感谢您接受我们的邀请。那我给您发一下园区的配置介绍，未来有时间可以来参观了解。那您现在对养老社区的了解是为自己做打算，还是为父母做打算呢？
用户:给我父母
泰康的客户经理: 听的出来您非常有孝心哈，我们很多客户都是给父母了解入住的，您可以详细了解下。那您或者您的父母主要需求的是哪种房型呢？单人间，双人间，还是套间呢？
用户:双人间
泰康的客户经理: 好的，万老师，您希望为您的父母选择这个舒适的双人间。那我这边会为您详细了解一下您的需求，以便我们的养老顾问在为您安排参观活动的时候，能够提供更符合您需求的服务。请问您或者您的父母对这个养老社区有什么特别关注的方面吗？比如社区的环境、设施、或者是社区的一些服务内容等等。
用户:你介绍下
泰康的客户经理: 咱们养老社区能让您有个健康富足长寿的晚年生活，官方数据统计入住高端养老社区的老人要长寿8-10年。目前咱们养老社区您是给自己了解还是给父母了解呢？
用户:能长寿吗
泰康的客户经理: 咱们养老社区能让您有个健康富足长寿的晚年生活，官方数据统计入住高端养老社区的老人要长寿8-10年。您或者您的父母对这个养老社区有什么特别关注的方面吗？比如社区的环境、设施、或者是社区的一些服务内容等等。
用户:还长寿十年

===
基于销售流程和对话记录，你推理出来接下来沟通的销售流程名是：
    """
    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        result = asyncio.run(chat.achat_auto_llm(type="Colossal-LLaMA-2-7b_20231204b"))
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        logger.info(result)
