# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
import config
from bot.insurance_planner_gpt.context_local_util import local_data
from common.log import logger
import openai
from openai import AsyncOpenAI
import aiohttp
import asyncio
import itertools
import random
import threading
import settings

if settings.ONLINE:
    llm_host = "localhost"
    default_llm = "self"
else:
    llm_host = "socket.chatlingxi.com"
    default_llm = "self"
config.load_config()
import copy

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

client = AsyncOpenAI(
    # defaults to os.environ.get("OPENAI_API_KEY")
    api_key="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",
)

keys = [
    ("https://api.chatanywhere.com.cn/", "sk-cbxdqhRyCr6FUw1u8HZVQ1Dl5YMSXHsGLcJS8W1JzpcQGbHo"),
    ("https://api.chatanywhere.com.cn/", "sk-9B1kVYyf2tDLKxu0WFrM6n5z8Xm6zb8tyeKsz3Te4KswV7P7"),
    ("https://api.chatanywhere.com.cn/", "sk-nBRh1AqksxmVY7pMIHGZ6W5XGvkJk6KhXXuExXk3ZkQrin1z"),
    ("https://api.chatanywhere.com.cn/", "sk-XxsgcPX3ALAmpylCZWvalaDkYOWXNQ1uenrbDfwlgudNlxip"),
    ("https://api.chatanywhere.com.cn/", "sk-rgJjmdcnuSBaXMvegtNQI1MbU9Isme4LvWRl0PGiFDEl8sD4"),
]


azure_keys = [
    ("https://lx-gpt-australia-east.openai.azure.com/", "lx-gpt-4-ae", "f31f66c8601d488eac461ade81c98da1"),
    ("https://lx-gpt-canada-east.openai.azure.com/", "lx-gpt-4-ce", "6349021ec7fa47a7b526e92bf638ccf3"),
    ("https://lx-gpt-east-us2.openai.azure.com/", "lx-gpt-4-eu2", "aefe46a980ba4a5bb24845f5618b05ce"),
    ("https://lx-gpt-france-central.openai.azure.com/", "lx-gpt-4-fc", "21b9bf1adb5b48c2a6d177a02e0c0816"),
    ("https://lx-gpt-norway-east.openai.azure.com/", "lx-gpt-4-ne", "ebbb7c0d824f437394f3ccdf56bdaf64"),
    ("https://lx-gpt-south-india.openai.azure.com/", "lx-gpt-4-sl", "42a1c23922394c559a335e9786e5b358"),
    ("https://lx-gpt-sweden-central.openai.azure.com/", "lx-gpt-4-sc", "7b175824ea6c42e8a07c8456704bb791"),
    ("https://lx-gpt-uk-south.openai.azure.com/", "lx-gpt-4-us", "b896d019f5964300a93b821351ac9017"),
    ("https://lx-gpt-west-u.openai.azure.com/", "lx-gpt-4-wu", "340e512ddc7848bf814570dccd8ec149"),
]

backup_keys = [("https://api.chatlingxi.com", "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")]
random.shuffle(keys)
random.shuffle(azure_keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(backup_keys)
azure_data_cycle = itertools.cycle(azure_keys)


# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "system", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def async_chat_with_openai(self, retry_count=0):
        logger.info("gpt4_openai，the gpt prompt is: {}".format(self.gpt_prompt))
        # 通过base64读取文件
        import base64
        with open("/Users/zengwenjia/Downloads/rBYA3GSjgrSAJ2TpAAy9f9cdl1Y965.jpg", "rb") as f:
            base64_data = base64.b64encode(f.read())
            base64_image = base64_data.decode('utf-8')

        try:
            response = await client.chat.completions.create(
                model="gpt-4-vision-preview",  # 对话模型的名称
                messages=[
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": "详细下图片中产品内容"},
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/jpeg;base64,{base64_image}"
                                },
                            },
                        ],
                    }
                ],
                max_tokens=2000,
            )
            result = response.choices[0].message.content
        except Exception as ee:
            try:
                logger.error("gpt4 error: {}".format(ee))
                # if retry_count < 1:
                #     return await self.achat_with_azure(retry_count + 1)
                result = ""
            except Exception:
                logger.error("gpt4 stream retry error: {}".format(ee))
                raise TimeoutError

        return result

    async def chat_with_openai_stream(self, retry_count=0):
        logger.info("gpt4_openai, the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = await openai.ChatCompletion.acreate(
                model="gpt-4",  # 对话模型的名称
                messages=self.gpt_prompt,
                top_p=1,
                temperature=0.9,
                max_tokens=1024,
                stream=True,
                request_timeout=100,
            )

            async for chunk in response:
                if chunk:
                    data = json.loads(str(chunk))
                    yield data

        except Exception as ee:
            logger.error("gpt4 stream error: {}".format(ee))

    async def achat_with_proxy(self, retry_count=0):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key,
            }
            payload = {
                "model": "gpt-3.5-turbo",
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as ee:
            logger.error("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy(retry_count + 1)

        return result

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4"
        if retry_count > 0:
            model_type = "gpt-4-0314"
        try:

            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "gpt-4",
                                     session_id + ":" + message_id)


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        return result


    async def achat_with_azure_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
                "api-key": key
            }
            payload = {
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                "model": "gpt-4-1106-preview",
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "azure-gpt-4",
                                     session_id + ":" + message_id)

        except Exception as ee:
            logger.error("the azure key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("azure gpt error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_azure_gpt4(retry_count + 1)

        return result


    async def achat_with_proxy_ernie(self, save_data=True, save_path="ernie"):
        logger.info("the ernie prompt is: {}".format(self.prompt))
        result = ""
        try:
            url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=" + ernie_token
            headers = {
                'Content-Type': 'application/json'
            }

            payload = json.dumps({
                "messages": [
                    {
                        "role": "user",
                        "content": self.prompt
                    }
                ],
                "stream": False,
            })

            async with aiohttp.ClientSession() as session:
                async with session.post(url, headers=headers, data=payload) as response:
                    response_data = await response.text()

                    result = json.loads(response_data)["result"]

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result, save_path)


        except Exception as ee:
            logger.exception("ernie proxy error: {}".format(ee))
        return result

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))


    async def achat_with_proxy_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-0314",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt4-0314 stream again error: {}".format(ee))


    def chat_with_azure(self, retry_count=0):
        logger.info("gpt3_5_azure，the gpt prompt is: {}".format(self.gpt_prompt))
        try:
            response = openai.ChatCompletion.create(
                api_type="azure",
                api_version="2023-03-15-preview",
                api_base="https://lingxi-openai.openai.azure.com",
                api_key="45a5ee249f364e208dd950f87ab5aba7",
                engine="gpt-35",
                messages=self.gpt_prompt,
                temperature=0.7,
                max_tokens=1024,
                request_timeout=15,
            )
            result = response["choices"][0]['message']['content']
        except Exception as ee:
            try:
                logger.error("gpt3.5 azure error: {}".format(ee))
                if retry_count < 1:
                    return self.chat_with_azure(retry_count + 1)
                result = ""
            except openai.error.InvalidRequestError:
                raise openai.error.InvalidRequestError("token limit", None)
            except openai.error.Timeout:
                raise TimeoutError
            except Exception:
                logger.error("gpt3.5 stream retry error: {}".format(ee))
                return ""

        return result


    async def achat_with_azure_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        headers = {
            "Content-Type": "application/json",
            "api-key": key
        }
        payload = {
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "model": "gpt-4-1106-preview",
            "max_tokens": 2048,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result
        except Exception as ee:
            # 打印key
            logger.error("the azure key is: {}".format(key))
            logger.exception("azure gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(azure_data_cycle)
                url = key_tuple[0]
                model = key_tuple[1]
                key = key_tuple[2]
                headers = {
                    "Content-Type": "application/json",
                    "api-key": key
                }
                payload = {
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the azure key is: {}".format(key))
                logger.exception("azure gpt4 stream again error: {}".format(ee))


    async def achat_self_llm(self, model_name="m_2023_11_29_model", save_data=True, save_path="llm"):
        try:
            session_id = local_data.session_id
            message_id = local_data.message_id
        except Exception:
            session_id = ""
            message_id = ""
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
            }
            payload = {
                "prompt": str(self.gpt_prompt[0]['content']).strip(),
                "model": model_name,
                "temperature": 0.7,
                "max_tokens": 41,

            }
            async with aiohttp.ClientSession() as session:
                response = await session.post("http://180.184.70.219:8000/v1/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['text']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "azure-gpt-4",
                                     session_id + ":" + message_id)

        except Exception as ee:
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("self gpt error: {}".format(ee))

        return result

    async def achat_self_llm_stream(self, model_name="planner_llama-2_13b_new"):
        logger.info("the llm prompt is :{}".format(self.prompt))
        default_llm = "planner_llama-2_13b_new"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.9,
                "max_new_tokens": 1024,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_qwen_llm(self, model_name="Qwen-7B-Chat"):
        async for x in self.achat_qwen_llm_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        return x["text"]

    async def achat_qwen_llm_stream(self, model_name="Qwen-7B-Chat"):
        logger.info("the qwen llm prompt is :{}".format(self.prompt))
        default_llm = "Qwen-7B-Chat"
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.9,
            "max_new_tokens": 1024,
            'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "sep": "<|im_end|>",
            "stop_token_ids": [
                151643,
                151644,
                151645,
            ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
            "stop_str": "<|endoftext|>",
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": model_name,
                "prompt": self.model_prompt,
                "temperature": 0.9,
                "max_new_tokens": 1024,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "sep": "<|im_end|>",
                "stop_token_ids": [
                    151643,
                    151644,
                    151645,
                ],  # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
                "stop_str": "<|endoftext|>",
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
        elif type == 'qwen' and is_stream:
            return self.achat_qwen_llm_stream()
        elif type == "qwen" and not is_stream:
            return await self.achat_qwen_llm()
        elif type == "gpt" and is_stream:
            return self.achat_with_proxy_stream_gpt4()
        elif type == "gpt" and not is_stream:
            return await self.achat_with_proxy_gpt4()
        elif type == "azure" and is_stream:
            return self.achat_with_azure_stream_gpt4()
        elif type == "azure" and not is_stream:
            return await self.achat_with_azure_gpt4()
        elif type == "ernie" and not is_stream:
            return await self.achat_with_proxy_ernie()
        elif type == "baichuan_planner" and is_stream:
            return self.achat_self_llm_stream("baichuan_planner")
        elif type == "baichuan_planner" and not is_stream:
            return await self.achat_self_llm("baichuan_planner")
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
            作为一个专业的、非常懂营销的信贷领域的销售员，你正通过电话和用户沟通，你需要根据当前的对话记录用简洁的一句短语总结用户的当前想要解决的问题或疑议。
需要注意以下要求：
1.如果用户表达是一句完整的话，不需要联系上文的销售员说的话，如果不是完整的，需要联系上文销售员说的话。
2.用户问题包括：非本人、询问什么平台、是否会查征信、利息多少，还款方式、提前还款、下款时间，不满投诉、投诉倾向、别给我打电话了，怎么有我的号码，询问工号，静音未回复，怎么注销，已经注销，打电话目的，不愿意参与兜底降息、提额、优惠券等活动，不会操作，通话中没有网络，为什么要操作，咨询京东其他产品，操作麻烦，不愿意加微，咨询兜底降息、提额、优惠券等产品/活动信息，是否有销售任务，挂机后自己操作，多次不需要，多次在忙等等；
3.如果用户当前没有问题，则输出"用户无问题"，不要无中生有。
4.用户表示已经提额成功或借款成功，表示用户无问题。
5.用户表述额度相关问题，询问额度多少，表示额度低，想提升额度，用户问题分别是额度多少，额度低，如何提升额度。
6.用户表示不能用，额度不能用，无法提供服务，综合评分不足等，用户问题是无法正常借款。
7.用户表示没有额度了、额度用完了等，用户问题是额度已用完。
8.用户询问利息多少，想降低利率，用户问题分别是利息多少，如何降低利率。
9.用户想要息费减免券、折扣券、优惠券、打折券、降低利率利息、觉得利息高、有利率不满问题等，用户问题是利率高。
10.用户表示是智能助手，语音助手，电话助理，请录音，录音超时等时，用户其实并非真人而是AI回复，这时用户问题是并非真人。
11.用户表示稍后查看，之后操作，之后会考虑，有需要再说等，用户问题是会考虑。用户肯定回应算是用户无问题。
12.用户表示会使用APP，会操作APP，知道操作流程，用户问题是自己会操作。
13.用户表示在忙，开车，上班等没有时间进行沟通时，用户问题是在忙。
14.用户未明白销售员的表述含义，或者希望销售员简明点表述，用户问题是没听懂。
15.用户想打断销售员、用户要求销售员暂停，用户问题是打断。
16.用户询问怎么去操作APP，怎么去查看，用户问题是如何操作。
17.用户询问去哪操作，去哪查看，用户问题是申请渠道。
18.用户表述不需要，不用等拒绝含义，用户问题是不需要。用户可能会用不缺钱、使用其他平台等来表示自己不需要，这时应返回"不需要-不缺钱"、"不需要-使用其他平台"。用户一句话中超过4次充分表述不需要，不用，用户问题是强烈拒绝。
19.用户询问免息券，优惠券的作用是什么，用户问题是：优惠券作用。免息券，优惠券使用期限，用户问题是：优惠券期限。
20.用户询问如何去查看免息券，优惠券，用户问题是如何查看优惠券。用户询问如何去使用免息券，优惠券，用户问题是如何使用优惠券。
21.用户担心借了之后还不起，借了之后造成信用损失，用户问题是还不起。
22.用户只有明确否定是本人，用户问题才是非本人。在核身阶段单纯的"你说"等，应是无问题，询问"你哪位"等是"询问什么平台"
23.用户质疑打电话的合法性、投诉到公检法、警察或者用户主动表达自己是记者、警察等都是表明用户的问题是投诉或投诉倾向。
===
销售员:喂您好，请问您是李先生吗？
用户:是的
销售员:您好，我是京东金融的客户经理哈，工号95266，然后本次来电是为了通知到您，咱们金条借款是给你提了额度的，本次提额也只回馈了极少部分优质老客户，您最近有关注到吗？
用户:没有
销售员:那好呢这次的活动确实也是比较难得，提升的额度也是为了方便您后续使用的，本次可以全额使用，之后还有机会再次调整额度费率的，具体以系统审批为准，您可以先看一下好吧？
用户:你打这个电话符合法律吗？
===
结合对话记录和注意要求，用一句短语简洁总结用户当前的问题（如果有多个只返回最需要解决的一个），用户问题是：
    """
    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        # result = asyncio.run(chat.async_chat_with_openai())
        result = asyncio.run(chat.achat_auto_llm(type="azure"))
        print(result)
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        logger.info(result)
