# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
# import config
from common.log import logger
import aiohttp
import asyncio
import itertools
import random
import uuid
from util_tool import utils
import os
import datetime
import threading
import settings

if settings.ONLINE:
    llm_host = "172.30.0.22"
    llm_host_test = '172.26.4.253'
    default_llm = "self"
else:
    llm_host = "socket.chatlingxi.com"
    llm_host_test = 'socket.chatlingxi.com'
    llm_host = "116.196.97.15"
    llm_host_test = '8.142.69.133'
    default_llm = "self"
logger.info('llm host:{}'.format(llm_host))
# config.load_config()
import copy

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

keys = [

    ("https://api.chatanywhere.com.cn/", "sk-fPabEZoZhYONpJKebMh8hO2Cf4BtcmVIzzoNtMrhN4a4PN16"),
    ("https://api.chatanywhere.com.cn/", "sk-hPsGy4D9kGkV2APuR8OdOUkYQr5XQjSr7mY2SADDJ95ABP0p"),
    ("https://api.chatanywhere.com.cn/", "sk-ENeLI2bhHCydqg4ErWoq6teG4Hu3KzmwCoyoZmdG9D5HbfxY"),
    ("https://api.chatanywhere.com.cn/", "sk-sU1EowMgsVd6Or6RmNzuJQNdmQdCl26i45HRk8akiS7RSNy8"),
    ("https://api.chatanywhere.com.cn/", "sk-9Lp9W6nuFsmfFsCGNaIpXNRi8KSI4qzSUGmGVG0zxTRgLHJU"),
]

train_keys = [
    ("https://api.chatanywhere.com.cn/", "sk-wWZiRMreyhtnwGm6tckAsSb3hJR5YYmVyFQTRzKTWNwQZmXi"),
    # ("https://api.chatanywhere.com.cn/", "sk-pRq1Rn0OiOo2yM8rK2Dt1FhQl4uoWkzku8dDuJ2Y2FF8mIXo"),
    ("https://api.chatanywhere.com.cn/", "sk-Wf1bsQQ1jYX1koWw7uUCH5HSDWJUi6OR8Xx8YXmWyVCv8sk0"),
    # ("https://api.chatanywhere.com.cn/", "sk-BWhs8Hn3bz2C9eqlk2ZjI6oXC4n33whMPWrFXis4suryxV77"),
    ("https://api.chatanywhere.com.cn/", "sk-xuqalfD6UNQbkb0RfVUjNbXbd1OosvOrc7R2uvud42gTPibN"),
    # ("https://api.chatanywhere.com.cn/", "sk-mAk7ME8MbmYt4zs76cKGIKjxsgFHEgPyejGJjfDf3hOBoGLP"),
    ("https://api.chatanywhere.com.cn/", "sk-zM6x5IUg0q6u3gtGc4WLxlkiwrKnumqS83JiWVDsXRaW1PLj"),
    ("https://api.chatanywhere.com.cn/", "sk-Vf6Y5ZrW7nDCzEs7bv3a0jJ4RUGE1Z4fly6hJLqPok1PlEBF"),
    # ("https://api.chatanywhere.com.cn/", "sk-CfQl1kVdPM13JiX6q6l9XAcM3O6aLhkNz2A0OBq0pusEHNck"),
    ("https://api.chatanywhere.com.cn/", "sk-FdXROLxHzyxMhKXY5QzhLwh4IRlTTEL4x7sPk9gCCmbicOdu"),
    # ("https://api.chatanywhere.com.cn/", "sk-SExov0P7oCH9QaWPND3aa7W7hN8M4ZDsmgFkVFTczJDvUZTs"),
    ("https://api.chatanywhere.com.cn/", "sk-znEgLNyNdYShdo81hsXwGljGmD0AONv4iMBHbxZqbBVkwxWV"),
    ("https://api.chatanywhere.com.cn/", "sk-njfT1tzFaEaUrxCoUaYqz66rRSLlJKW5fZU5WM7n2OLtKnke"),
    # ("https://api.chatanywhere.com.cn/", "sk-5AwmTKGWaUEARvD3xWzihqDDey3HrTYBFTXMFxRvd26HUlaG"),
    ("https://api.chatanywhere.com.cn/", "sk-tpXAB0WyMjwhxhkpXqPQy38HsGoU13xmQFLT8XFFsPOyzH5A"),
    ("https://api.chatanywhere.com.cn/", "sk-lM4eNRqXXi34cniqBRZ2Kq5Os3ZMPIikFeIyrg1ISmzdF5po"),
    ("https://api.chatanywhere.com.cn/", "sk-kMS9VEWCE5eRvLJpk7QyjYQdxJ7OJnG3opu4VMYgEYBvUTNC"),
    # ("https://api.chatanywhere.com.cn/", "sk-e4fhhETc254w2MOVmjLJYR8cvevYpsvupaKL2JJQE7msCmqI"),
    ("https://api.chatanywhere.com.cn/", "sk-hCgQFaAB1BHA3NX4ocybDoNWbLsUr9RlCvfyP715s0fIi56s"),
    ("https://api.chatanywhere.com.cn/", "sk-7GHAuvERby2d1HbcObyPriyQNlpK20LecH17wfVezux2G9cs"),
]

keys = train_keys

backup_keys = [("https://api.chatlingxi.com", "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")]
random.shuffle(keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(backup_keys)


# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "system", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def achat_with_proxy(self, retry_count=0):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key,
            }
            payload = {
                "model": "gpt-3.5-turbo",
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']
        except Exception as ee:
            logger.error("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy(retry_count + 1)

        return result

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        # logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt[0].get('content')))
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4-1106-preview"
        if retry_count > 0:
            model_type = "gpt-4-1106-preview"
        try:
            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, )


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        return result

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))

    async def achat_with_proxy_stream_gpt4(self):
        logger.info("the gpt prompt is: {}".format(self.gpt_prompt))
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-0314",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt4-0314 stream again error: {}".format(ee))

    async def achat_self_llm(self, model_name="financial_loan_7b", retry_count=0):
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            # start_time = time.time()
            # logger.info("the llm prompt is :{}".format(self.prompt))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:
                    content = await response.content.read()
            result = json.loads(content.decode())["text"]
            # logger.info("the llm prompt spend is :{}".format(time.time() - start_time))
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("llm proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_self_llm(model_name, retry_count + 1)

        return result

    async def achat_self_llm_financial_loan_7b_b(self, model_name="financial_loan_7b_b", retry_count=0):
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            # start_time = time.time()
            # logger.info("the llm prompt is :{}".format(self.prompt))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":31001" + "/worker_generate",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:
                    content = await response.content.read()
            result = json.loads(content.decode())["text"]
            # logger.info("the llm prompt spend is :{}".format(time.time() - start_time))
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("llm proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_self_llm_financial_loan_7b_b(model_name, retry_count + 1)

        return result

    async def achat_self_llm_test(self, model_name):
        result = ""
        async for x in self.achat_self_llm_test_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        if x and 'text' in x:
            result = x["text"]
        return result

    async def achat_self_llm_test_stream(self, model_name):
        logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host_test + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host_test + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_self_llm_financial_first_loan(self, model_name):
        result = ""
        async for x in self.achat_self_llm_financial_first_loan_stream(model_name):
            # logger.info("the llm response is :{}".format(x["text"]))
            pass

        if x and 'text' in x:
            result = x["text"]
        return result

    async def achat_self_llm_financial_first_loan_stream(self, model_name):
        logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":11001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":11001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_self_llm_stream(self, model_name="planner_llama-2_13b_new"):
        # logger.info("the llm prompt is :{}".format(self.prompt))
        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 128,
            # 'top_p': 0.95,
            # "do_sample": False,
            # "best_of": 5,
            # "use_beam_search": True,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }
        try:
            start_time = time.time()
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
            # logger.info("the llm prompt is :{}".format(self.prompt))

        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.3,
                "max_new_tokens": 128,
                # "do_sample": False,
                # 'top_p': 0.95,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21001" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=10)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
        elif type == "gpt" and is_stream:
            return self.achat_with_proxy_stream_gpt4()
        elif type == "gpt" and not is_stream:
            return await self.achat_with_proxy_gpt4()
        elif type == 'llm_test':
            return await self.achat_self_llm_test(type)
        elif type == 'financial_first_loan':
            return await self.achat_self_llm_financial_first_loan(type)
        elif type == 'financial_loan_7b_b':
            return await self.achat_self_llm_financial_loan_7b_b(type)
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
    请记住，你是一名专业的销售员，你正通过电话和用户沟通，你需要根据对话记录、解决方案，销售技巧和用户情境，在解决方案和销售技巧备选话术中选择或组合最合适的话术回复用户。你主要的任务是回应并解决用户的问题，然后引导用户完成操作并全额借款，实现金条的销售转化。为了完成这个任务，你需要不断讲解活动、优惠券等给用户带来的权益，通过多轮对话激发用户用款需求，引导用户完成借款。
注意以下事项：
1.你的场景是电话销售，表达的内容足够通俗易懂简洁且口语化，随和、高中文化可理解，尽量表现出足够的亲和力和专业性。
2.如果用户当前有问题或疑问，在考虑解决用户问题的同时，也需要注意销售技巧的引导，特别是对话的初始阶段，一定要注意销售技巧的完整。当用户表示在忙、不需要等情况时，不要着急挂机，在不引起用户反感的情况下，继续进行多次劝服用户完成操作借款。
3.根据对话记录和当前的对话阶段做出回应,不要重复用户说过的信息,回复的内容不要有英文，不要有重复的卖点话术，要基有一定的变化，回复话术不要太长。只产生你的话术，不要产生用户的对话，每次只生成一个回复！把开头的"很抱歉"等句式过滤掉，非挂机结束语中不要表达诈骗相关的提醒。
4.当用户连续3次表达在忙、不需要、会考虑等、或者用户有投诉倾向时，先安抚客户再礼貌再见挂机。
5.生成的话术只能在解决方案话术集中选择一句话术或者在销售技巧话术集中选择一句话术，需要保障原有话术的完整性，不要无中生有，也不要过度生成。
6.回复的话术可以是候选集的一句话术，也可以将解决方案话术集中选择一条话术和销售技巧的一条话术进行拼接，但最终回复的话术只能有一个问题，不能同时出现两个问题，这样会使用户困扰。
7.对话的初始阶段(前6轮)，对于有疑问非投诉的用户，在回答用户问题的同时，也一定要使用销售技巧话术的拼接。
8.在"询问有无App"时，先确认用户是否有京东App，没有京东APP再确认是否有京东金融App。
9.需要加用户微信时，先询问是否可以添加微信，用户同意则发送申请并结束再见，用户不同意则不发送申请并结束再见。
10.用户不止一次表示在开车或开会时，要告知用户自己操作然后礼貌挂机，只表达过一次在开车或者开会，可以继续营销。
用户问题：无
销售技巧：表明来意
用户情境：{'用户姓名': '王女士', '用户年龄': '35', '初始用户权益': '已获得优惠券', 'call次': '1-call', '是否已打开APP': '未打开APP'}
销售技巧话术集：
[{'沟通策略': '表明来意', '用户情境': '已获得优惠券,1-call', '话术': '哎您好您好我是您的京东金融客户经理工号95266，来电是想告诉您一个好消息啊就是咱们金条借款服务给您账户发了一张利息折扣券，您最近有打开京东查看使用了吗？', '录音编号': 'SD439-888C', '动作标签': '发券:'}, {'沟通策略': '表明来意', '用户情境': '已获得优惠券', '话术': '您好，我是京东金融的客户经理哈，工号95266，来电是为了通知到您，咱们金条借款服务为了回馈老客户，特意给你账户发了一张利息打折券，这次打折活动也是只特邀了极少部分的优质用户参与的，想问一下您这边最近有没有查看过您的利息打折券呢？', '录音编号': 'SD439-11C', '动作标签': '发券:'}, {'沟通策略': '表明来意', '用户情境': '已获得优惠券', '话术': '我这边是京东金融的，工号95266，是金条服务给到您一张利息打折券，现在使用的话可以节省不少的利息，而且还有利于额度费率的再次调整，具体以系统审批为准，那您这边可以先打开京东app看一下这个活动好吗？', '录音编号': 'SD441-11C'}, {'沟通策略': '再次表明身份来意', '用户情境': '已获得优惠券', '话术': '这次来电主要给到您一张利息折扣券，这次可以搭配利息折扣券全额使用，这样的话综合成本也不高，而且系统还知道了咱们是有大额用款需求的，以后有更合适的利息和额度也有机会给到您，这次您先用一笔好吧？', '录音编号': 'SD447-888C', '动作标签': '发券:'}, {'沟通策略': '加微意向确认', '话术': '要不您看这样吧，我用官方企业微信添加一下您，后续针对金融方面的问题可以直接问我，有什么优惠也会优先推送给您，我给您发送好友申请，您通过下可以吗?', '录音编号': 'SD500-11C'}, {'沟通策略': '加微意向确认', '话术': '好的，您现在操作一下借款，电话邀约期间借款，系统会识别到您是高意向客户，后续有合适的活动也会通知给咱们，另外我这边加您个微信，后续有提额降息的活动通过微信推送给您，您可以随时关注的好嘛？', '录音编号': 'LS033-1C', '动作标签': '同意加微信小码'}, {'沟通策略': '带操作-告知操作简单', '话术': '那您先忙，稍后您不忙了自己先仔细看一下，这个活动确实比较难得，您可以先把这笔钱借出来，生意周转日常消费都能做应急使用的，提交借款后可能会有工作人员联系您核实是否是本人借款，注意接听下就行，您稍后别忘了操作好吗？', '录音编号': 'SD203-11C'}, {'沟通策略': '告诉用户操作简单', '话术': '其实使用额度很简单，就1分钟就能操作完，特别简单，我带您看一下好吧？', '录音编号': 'SD64-11C'}]
会话历史：
销售员:喂您好，请问您是王女士吗？
用户:是的
基于销售员需要沟通的话术内容，选择其的下一步话术的录音编号是（多个录音编号拼接用,分割）：
    """

    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        start_time = time.time()
        # result = asyncio.run(chat.achat_auto_llm(type="gpt"))
        # result = asyncio.run(chat.achat_auto_llm(type="llm_test"))
        result = asyncio.run(chat.achat_auto_llm(type="llm_test"))
        logger.info("this spend is :{}".format(time.time() - start_time))
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        logger.info(result)
