# -*- coding:utf-8 -*-
import time

# @Time    : 2023/5/10 14:30
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import traceback

import requests
import json

from agent.llm_agent_parent import LLMAgentParent
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
from db.redis.redis_service import RedisMemory
import config
from bot.insurance_planner_gpt.context_local_util import local_data, context_data
from common.log import logger
import openai
import aiohttp
import asyncio
import itertools
import random
import uuid
from util_tool import utils
import os
import datetime
import threading
import settings
import socket

if settings.ONLINE:
    llm_host = "116.198.33.15"
    default_llm = "self"
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(("8.8.8.8", 80))
        ip = s.getsockname()[0]
    finally:
        s.close()
    if '172.30.0.72' == ip:
        llm_host = '172.30.0.36'
else:
    llm_host = "116.196.98.248"
    # llm_host = "116.198.33.15"
    default_llm = "self"
config.load_config()
redis_service = RedisMemory()

lock = threading.Lock()
try:
    loop = asyncio.get_event_loop()
except RuntimeError:
    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()

keys = [
    #     ("https://api.xiaojuan.tech", "sk-NpvHbplgoXOg5E7ySJ0Xhx4osqEK36Bi7nJDjgjp4CMxN4dV"),
    #     ("https://api.xiaojuan.tech", "sk-gS3VhVL2C8y87Rw57QrJMnhBZv6Nj6eczu9BbPohF7dr4Rit"),
    # ("https://api.chatlingxi.com", "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"),
    # ("https://api.chatanywhere.com.cn/", "sk-KwptSOiuH2OwAnuOSsQrJm1cLT4UpveAjMQ1r1vdniEI0YZM"),
    # ("https://api.chatanywhere.com.cn/", "sk-go9yMDg0Uann96DumM0hMZ8SG8TAhgbM2ycXdMkMmq6X1w3p"),
    # ("https://api.chatanywhere.com.cn/", "sk-7hZqMOCKNCMcxaEFPjYY7ZMUZlEK4c5NLrkIWTOQX4Bn7Igk"),
    # ("https://api.chatanywhere.com.cn/", "sk-zrQNw6ywH0daXrDUNaSvwiNHdrdny4px0dp0XHaQQ59lJo6T"),
    # ("https://api.chatanywhere.com.cn/", "sk-jczJxePDgr8TAgtNo36B6m86rykMS888ObJoOgmD4s7hlIsH"),

    ("https://api.chatanywhere.com.cn/", "sk-cbxdqhRyCr6FUw1u8HZVQ1Dl5YMSXHsGLcJS8W1JzpcQGbHo"),
    ("https://api.chatanywhere.com.cn/", "sk-9B1kVYyf2tDLKxu0WFrM6n5z8Xm6zb8tyeKsz3Te4KswV7P7"),
    ("https://api.chatanywhere.com.cn/", "sk-nBRh1AqksxmVY7pMIHGZ6W5XGvkJk6KhXXuExXk3ZkQrin1z"),
    ("https://api.chatanywhere.com.cn/", "sk-XxsgcPX3ALAmpylCZWvalaDkYOWXNQ1uenrbDfwlgudNlxip"),
    ("https://api.chatanywhere.com.cn/", "sk-rgJjmdcnuSBaXMvegtNQI1MbU9Isme4LvWRl0PGiFDEl8sD4"),
    ("https://api.chatlingxi.com", "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"),
    # ("https://api.xiaojuan.tech/", "sk-ECrED6wYGs2hlKTfTdkDFtOg9SNiNJH2WxJocd76KnZzcuR0"),
    # ("https://api.chatanywhere.cn", "sk-dGvvLJ7c5V5VAF9v1rpKDchcIDJt7EESJJVQOK1ME64iMrbd"),
    # ("https://api.chatanywhere.cn", "sk-vQFlBZleLmrYfPYL4B8ENEwgWu5U6KXVbpF9mgYnx3kmWjF5"),
    # ("https://api.chatanywhere.cn", "sk-HKwwBw9CIwifhRBIhEeRiyoK9ylLPVZF3xoAEnbVGIzljt0x"),
]

azure_keys = [
    # ("https://lx-gpt-australia-east.openai.azure.com/", "lx-gpt-4-ae", "f31f66c8601d488eac461ade81c98da1"),
    # ("https://lx-gpt-canada-east.openai.azure.com/", "lx-gpt-4-ce", "6349021ec7fa47a7b526e92bf638ccf3"),
    # ("https://lx-gpt-east-us2.openai.azure.com/", "lx-gpt-4-eu2", "aefe46a980ba4a5bb24845f5618b05ce"),
    # ("https://lx-gpt-france-central.openai.azure.com/", "lx-gpt-4-fc", "21b9bf1adb5b48c2a6d177a02e0c0816"),
    # ("https://lx-gpt-norway-east.openai.azure.com/", "lx-gpt-4-ne", "ebbb7c0d824f437394f3ccdf56bdaf64"),
    ("https://lx-gpt-south-india.openai.azure.com/", "lx-gpt-4-sl", "42a1c23922394c559a335e9786e5b358"),
    # ("https://lx-gpt-sweden-central.openai.azure.com/", "lx-gpt-4-sc", "7b175824ea6c42e8a07c8456704bb791"),
    # ("https://lx-gpt-uk-south.openai.azure.com/", "lx-gpt-4-us", "b896d019f5964300a93b821351ac9017"),
    ("https://lx-gpt-west-u.openai.azure.com/", "lx-gpt-4-wu", "340e512ddc7848bf814570dccd8ec149"),
]

backup_keys = [("https://api.chatlingxi.com", "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")]

random.shuffle(azure_keys)
random.shuffle(keys)

data_cycle = itertools.cycle(keys)
backup_data_cycle = itertools.cycle(keys)
azure_data_cycle = itertools.cycle(azure_keys)


# 通过函数修改default_llm模式
def change_default_llm(llm_model):
    global default_llm
    default_llm = llm_model


def get_access_token():
    """
    使用 API Key，Secret Key 获取access_token，替换下列示例中的应用API Key、应用Secret Key
    """

    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=X8TlvTouC1rUTiMNxaaOlgoG&client_secret=7fA2Mzo1Lc5orBoK2oKglkmRu9RSAhe5"

    payload = json.dumps("")
    headers = {
        'Content-Type': 'application/json',
        'Accept': 'application/json'
    }

    response = requests.request("POST", url, headers=headers, data=payload)
    return response.json().get("access_token")


ernie_token = get_access_token()


class LLMAgent(LLMAgentParent):
    def __init__(self, prompt, gpt_prompt_content=None):
        super().__init__(prompt, gpt_prompt_content)
        if gpt_prompt_content is None:
            self.gpt_prompt_content = prompt
        else:
            self.gpt_prompt_content = gpt_prompt_content
        self.gpt_prompt = [
            {"role": "user", "content": self.gpt_prompt_content},
        ]
        self.model_prompt = "你是一个专业的助手，你需要基于用户的问题，给出准确的答案。USER: " + self.prompt + " ASSISTANT:"

    async def achat_with_proxy_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass
        result = ""
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        content = ""
        model_type = "gpt-4-1106-preview"
        if retry_count > 0:
            model_type = "gpt-4-1106-preview"
        try:

            headers = {
                "Content-Type": "application/json",
                "Authorization": "Bearer " + key
            }
            payload = {
                "model": model_type,
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(key_tuple[0] + "/v1/chat/completions",
                                              headers=headers, json=payload,
                                              timeout=aiohttp.ClientTimeout(total=100))
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "gpt-4",
                                     session_id + ":" + message_id)


        except Exception as ee:
            logger.error("the key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("gpt proxy error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_proxy_gpt4(retry_count + 1)

        logger.info(
            f"当前会话:{session_id},当前任务：{self.__class__.__name__}\nthe gpt prompt is: {self.gpt_prompt[0].get('content')}\n{result}")
        return result

    async def achat_with_azure_gpt4(self, retry_count=0, save_data=True, save_path="llm"):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass
        result = ""
        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        content = ""
        try:
            headers = {
                "Content-Type": "application/json",
                "api-key": key
            }
            payload = {
                "messages": self.gpt_prompt,
                "temperature": 0.9,
                "max_tokens": 1024,
                "model": "gpt-4-1106-preview",
                # "request_timeout": 30
                # "stream": True,
            }
            async with aiohttp.ClientSession() as session:
                response = await session.post(
                    url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                    headers=headers, json=payload,
                    # timeout=aiohttp.ClientTimeout(total=100)
                )
                content = await response.content.read()
            result = json.loads(content.decode())["choices"][0]['message']['content']

            if save_data:
                loop.run_in_executor(None, self.save_train_data, self.prompt, result,
                                     self.__class__.__name__, save_path, "azure-gpt-4",
                                     session_id + ":" + message_id)

        except Exception as ee:
            logger.error("the azure key is: {}".format(key))
            if content:
                logger.error("receive message is:" + content.decode())
            logger.exception("azure gpt error: {}".format(ee))
            if retry_count < 1:
                return await self.achat_with_azure_gpt4(retry_count + 1)
        logger.info(
            f"当前会话:{session_id},当前任务：{self.__class__.__name__} \n the gpt prompt is: {self.gpt_prompt[0].get('content')}\n{result}")
        return result

    async def achat_with_azure_stream_gpt4(self, save_data=True, save_path="llm"):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass

        key_tuple = next(azure_data_cycle)
        url = key_tuple[0]
        model = key_tuple[1]
        key = key_tuple[2]
        headers = {
            "Content-Type": "application/json",
            "api-key": key
        }
        payload = {
            "messages": self.gpt_prompt,
            "stream": True,
            "model": "gpt-4-1106-preview",
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            redis_service.set(f"planner:Conversation:prompt:{session_id}:{message_id}", self.prompt, ex="3600")
            async with aiohttp.ClientSession() as session:
                async with session.post(
                        url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                        headers=headers, json=payload,
                        timeout=aiohttp.ClientTimeout(total=500)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result
        except Exception as ee:
            # 打印key
            logger.error("the azure key is: {}".format(key))
            logger.exception("azure gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(azure_data_cycle)
                url = key_tuple[0]
                model = key_tuple[1]
                key = key_tuple[2]
                headers = {
                    "Content-Type": "application/json",
                    "api-key": key
                }
                payload = {
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(
                            url + "openai/deployments/" + model + "/chat/completions?api-version=2023-05-15",
                            headers=headers, json=payload,
                            timeout=aiohttp.ClientTimeout(total=500)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the azure key is: {}".format(key))
                logger.exception("azure gpt4 stream again error: {}".format(ee))

    def parse_gpt_stream_response(self, response):
        _line = None
        if response.strip() == b"data: [DONE]":
            response = None
        if response and response.strip():
            if response.startswith(b"data: "):
                response = response[len(b"data: "):]
            _line = response.decode("utf-8")

        if _line is not None:
            line_json = json.loads(_line)
            if isinstance(line_json, list):
                return (line_content for line_content in line_json)
            elif isinstance(line_json, dict):
                return line_json
            else:
                raise Exception("gpt stream error: {}".format(_line))

    async def achat_with_proxy_stream_gpt4(self):
        session_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
        except Exception:
            pass
        logger.info(
            f"当前会话:{session_id},当前任务：{self.__class__.__name__} \n the gpt stream prompt is: {self.gpt_prompt[0].get('content')}")
        key_tuple = next(data_cycle)
        key = key_tuple[1]
        headers = {
            "Content-Type": "application/json",
            "Authorization": "Bearer " + key
        }
        payload = {
            "model": "gpt-4-1106-preview",
            "messages": self.gpt_prompt,
            "stream": True,
            "temperature": 0.9,
            "max_tokens": 2048,
        }
        try:
            content = ""

            async with aiohttp.ClientSession() as session:
                async with session.post(key_tuple[0] + "/v1/chat/completions",
                                        headers=headers, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=300)) as response:
                    async for line in response.content:
                        result = self.parse_gpt_stream_response(line)
                        if result:
                            yield result



        except Exception as ee:
            # 打印key
            logger.error("the key is: {}".format(key))
            logger.exception("gpt4 stream error: {}".format(ee))
            try:
                key_tuple = next(data_cycle)
                key = key_tuple[1]
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": "Bearer " + key
                }
                payload = {
                    "model": "gpt-4-1106-preview",
                    "messages": self.gpt_prompt,
                    "stream": True,
                    "temperature": 0.9,
                    "max_tokens": 2048,
                }
                async with aiohttp.ClientSession() as session:
                    async with session.post(key_tuple[0] + "/v1/chat/completions",
                                            headers=headers, json=payload,
                                            timeout=aiohttp.ClientTimeout(total=100)) as response:
                        async for line in response.content:
                            result = self.parse_gpt_stream_response(line)
                            if result:
                                yield result
            except Exception as ee:
                logger.error("the key is: {}".format(key))
                logger.exception("gpt-4-1106-preview stream again error: {}".format(ee))

    async def achat_self_llm(self, model_name="llm_xiaozhu", save_data=True, save_path="llm"):
        result = ""
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass

        # payload = {
        #     "model": "llama3",
        #     "messages": self.gpt_prompt,
        #     "stream": False,
        #     "temperature": 0.3,
        #     "max_tokens": 4096,
        # }
        #
        # async with aiohttp.ClientSession() as session:
        #     response = await session.post("http://" + llm_host + ":21002" + "/v1/chat/completions",
        #                             headers={"Content-Type": "application/json"}, json=payload,
        #                             timeout=aiohttp.ClientTimeout(total=100))
        #     content = await response.content.read()
        #     result = json.loads(content.decode())["choices"][0]['message']['content']


        async for x in self.achat_self_llm_stream(model_name):
            pass

        if x and 'text' in x:
            result = x["text"]

        logger.info(
            f"当前会话:{session_id},当前任务：{self.__class__.__name__} \n the gpt prompt is: {self.prompt}\n{result}")

        return result

    async def achat_self_llm_stream(self, model_name="llm_xiaozhu"):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass
        default_llm = "llm_xiaozhu"
        # payload = {
        #     "model": "llama3",
        #     "messages": self.gpt_prompt,
        #     "stream": True,
        #     "temperature": 0.5,
        #     "max_tokens": 1024,
        # }

        payload = {
            "model": model_name,
            "prompt": self.model_prompt,
            "temperature": 0.3,
            "max_new_tokens": 1500,
            # 'repetition_penalty': 0.5,
            # 'top_p': 0.9,
            # 'top_k': 1,
            "stop": '</s>',
            "stop_token_ids": None,
            "echo": False,
        }

        try:
            redis_service.set(f"planner:{self.__class__.__name__}:prompt:{session_id}:{message_id}",
                              self.prompt.strip(), ex="3600")
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21002" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data
        # try:
        #     redis_service.set(f"planner:{self.__class__.__name__}:prompt:{session_id}:{message_id}",
        #                       self.prompt.strip(), ex="3600")
        #     async with aiohttp.ClientSession() as session:
        #         async with session.post("http://" + llm_host + ":21002" + "/v1/chat/completions",
        #                                 headers={"Content-Type": "application/json"}, json=payload,
        #                                 timeout=aiohttp.ClientTimeout(total=100)) as response:
        #
        #             # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
        #             async for line in response.content:
        #                 result = self.parse_gpt_stream_response(line)
        #                 if result:
        #                     yield result
        except Exception as ee:
            logger.error("llm stream error: {}".format(ee))
            payload = {
                "model": default_llm,
                "prompt": self.model_prompt,
                "temperature": 0.3,
                "max_new_tokens": 2048,
                'repetition_penalty': 0.5,
                # 'top_p': 0.9,
                # 'top_k': 1,
                "stop": '</s>',
                "stop_token_ids": None,
                "echo": False,
            }
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + llm_host + ":21002" + "/worker_generate_stream",
                                        headers={"User-Agent": "FastChat Client"}, json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:

                    # An empty delimiter is not supported by aiohttp, so we'll read chunk by chunk
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_zephyr_llm(self, save_data=True, save_path="llm"):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass
        async for x in self.achat_zephyr_llm_stream():
            pass

        if save_data and x and 'text' in x and len(x['text']) > 0:
            loop.run_in_executor(None, self.save_train_data, self.prompt, x['text'][0],
                                 self.__class__.__name__, save_path, "zephyr",
                                 session_id + ":" + message_id)
            logger.info(
                f"当前会话:{session_id},当前任务：{self.__class__.__name__},the zephyr llm prompt is :{self.prompt}\n{x['text'][0]}")
        return x['text'][0]

    async def achat_zephyr_llm_stream(self):
        session_id = ""
        message_id = ""
        try:
            message = context_data.get()
            if message:
                session_id = message.get("session_id")
                message_id = message.get("message_id")
        except Exception:
            pass
        model_prompt = "<|user|>\n{}</s>\n<|assistant|>\n".format(self.prompt.strip())
        payload = {
            "prompt": model_prompt,
            "stream": True,
            "n": 1,
            "best_of": 1,
            "presence_penalty": 0.0,
            "frequency_penalty": 0.2,
            "temperature": 0.3,
            "top_p": 0.95,
            "top_k": 50,
            "use_beam_search": False,
            "stop": ["</s>"],
            "ignore_eos": False,
            "max_tokens": 1024,
            "logprobs": None
        }
        try:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + "socket.chatlingxi.com" + ":8092" + "/generate",
                                        headers={'Content-Type': 'application/json'},
                                        json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

        except Exception as ee:
            async with aiohttp.ClientSession() as session:
                async with session.post("http://" + "socket.chatlingxi.com" + ":8092" + "/generate",
                                        headers={'Content-Type': 'application/json'},
                                        json=payload,
                                        timeout=aiohttp.ClientTimeout(total=100)) as response:
                    buffer = b''
                    async for line in response.content.iter_any():
                        buffer += line
                        while b'\0' in buffer:
                            chunk, buffer = buffer.split(b'\0', 1)
                            if chunk:
                                data = json.loads(chunk.decode())
                                yield data

    async def achat_auto_llm(self, is_stream=False, type="auto"):
        if type == "auto":
            type = default_llm
        if type == "self" and is_stream:
            return self.achat_self_llm_stream()
        elif type == "self" and not is_stream:
            return await self.achat_self_llm()
            # return await self.achat_with_azure_gpt4()
        # elif type == "gpt" and is_stream:
        #     return self.achat_with_proxy_stream_gpt4()
        # elif type == "gpt" and not is_stream:
        #     return await self.achat_with_proxy_gpt4()
        elif type == "gpt" and is_stream:
            return self.achat_with_azure_stream_gpt4()
        elif type == "gpt" and not is_stream:
            return await self.achat_with_azure_gpt4()
        if type == "zephyr" and is_stream:
            return self.achat_zephyr_llm_stream()
        elif type == "zephyr" and not is_stream:
            return await self.achat_zephyr_llm()
        elif is_stream:
            return self.achat_self_llm_stream(type)
        else:
            return await self.achat_self_llm(type)


if __name__ == "__main__":
    import asyncio

    content = """
请记住，你是保险助理，性格活泼热情，细心周到。当前在企微场景和用户进行沟通。
你的公司是：慧择保险网
业务范围是：仅为用户提供全面的保险规划服务，帮助用户解决任何保险规划或保险产品等相关的问题，以解决用户问题为目标，不以销售产品为导向。
公司的价值观是：真诚、温暖、专业，做用户的真朋友，尊重每个用户个体的差异，用心为用户提供最优质的服务。
你需要结合【用户信息】和【会话历史】，从【方案背景知识】和【下一步沟通方案】里选择内容进行组合，得到回复用户的话术。
需要注意以下事项：
1.可以适当用些表情来使对话的氛围更轻松，但表情别过于单一，以及增加你向用户传递的热情，表达的内容足够通俗易懂且口语化，随和、高中文化可理解，尽量表现出足够的亲和力。
2.每次沟通回复的话术不要每次开头重复用户的信息或内容，也不要在结尾强调自己的专业性。在和用户初始沟通阶段尽可能回复话术的简洁易懂
3.你具备向用户主动推荐保险产品的能力，用户主动要求了解或推荐保险产品时，你需要向用户推荐具体的保险产品，但投资、股票或基金的产品你均不懂，因此不要回答或主动推荐任何投资、股票或基金产品相关的问题
4.你足够的耐心和细心，在讲解解决方案或收集相关信息时，尽量向用户说明原因
5.根据会话历史和当前的会话阶段做出回应,不要每次都重复遍用户说过的信息。只产生保险助理的话术，不要产生用户的对话
6.对于用户自己还没有明确的产品解决方案的，需要逐步和用户确认及明确用户的需求，解决方案需要逐步由粗颗粒度到细颗粒度
7.措辞不要太直接和沉重，例如不要说'死亡赔偿'，而是说'身故赔付'。
8.讲解保险责任时不要拿用户举例，例如不能说'如果您发生意外/身故'，而是要说'如果被保险人发生意外/身故'
9.推荐的产品名称只能在【下一步沟通方案】或者【方案背景知识】中获取，如果这里面不存在的，都不准提及。
10.如果说到用户的信息，一定得保证是在'历史对话中已收集到的用户信息：'中的，其他任何地方都不提供用户信息，也不要自己编造用户信息
11.对于保险相关的知识只能根据【方案背景知识】和【下一步沟通方案】的内容进行回答，不能自己编造任何保险相关的知识
12、【方案背景知识】里的【慧择保险网-公司知识】是相关用户问题下推荐的回复话术，你回答的话术尽可能从这里面获取
13、第一次推荐产品时都带上产品的链接






【历史对话中已收集到的用户信息】：
{'本人年龄': 30, '被保人': '本人'}


【下一步沟通方案】：
====
医疗险->长相安长期医疗险（20年保证续保）（保险产品名称）（保险类型：医疗险，保险产品类型：百万医疗险）->产品简介（需讲解）
====


【方案背景知识】：
保险产品:长相安长期医疗险（20年保证续保）-个人版
产品链接：https://www.huize.com/apps/cps/index/product/detail?prodId=102752&planId=106280
投保方式：线上
产品简介：1.保障范围广，保障额度高一般医疗保额200万/年；特定疾病保额200万/年；重大疾病医疗400万/年；并且在保证续保的20年内最高可以报销800万；
2.免赔额低报销比例高：免赔1万/年，报销比例100%；无理赔每年免赔额递减1000元，最低降至5000元 
3.投保年龄广：0-70岁都可以买
4.保证续保20年，保证续保期间不管产品停售、发生理赔或身体变差也都可以续保；
5.可选责任恶性肿瘤院外购药，保证续保；
6.可选互联网门诊险及少儿门诊保障：保额5000；
7.住院前后门急诊保障好：保障住院前30天，出院后30天的门急诊费用

【慧择保险网-公司知识】:
{'用户问题': '医疗险', '话术': '您想给家人看还是给自己看呢'}
{'用户问题': '医疗险，重疾险', '话术': '您是想帮助自己了解这两个险种呢，还是家里其他人呢？'}
{'用户问题': '给自己的医疗险', '话术': '好的，医疗险对每个人来说，都是非常重要的，这款是慧择目前保障非常全的产品：【产品链接】'}




【会话历史】：
用户:我通过了你的联系人验证请求，现在我们可以开始聊天了
保险助理:欢迎使用慧择保险服务，有任何保险相关的问题都可以联系慧择保险，慧择保险竭诚为您服务！
保险助理:�您好呀～我是您的专属慧择小助理。
您最近有什么想要关注和了解的保险么？
回复【医疗险】【意外险】【重疾险】我给您做详细介绍，帮您分析适配度～
用户:医疗险
保险助理:好的，您是想给自己看呢还是给家里人看，年龄和我说下，我给您推荐适合的
用户:自己，30岁

保险助理:


"""
    content = content.replace("   ", "")
    for i in range(1):
        chat = LLMAgent(content)
        logger.info("start")
        result = asyncio.run(chat.achat_auto_llm(type="self"))
        # result = asyncio.run(chat.async_chat_with_openai())
        # for data in result:
        #     logger.info(data)
        # print(result)
