"""
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    created by lane.chang on '21/08/2023'
    comment: 人工智能聊天
"""
import re
import datetime

import ujson
from openai import AsyncOpenAI
from openai import OpenAI
from pydantic import Field
from typing import Union, Optional
from laner.pydantic import BaseModel

from config import Config
from project.lib.decorator import func_log, retry
from project.core.function import match_texts


class GptResult(BaseModel):
    """
    """
    code: int = Field(0, title='返回编号')
    message: str = Field('success', title='')
    data: Optional[Union[str, dict]] = Field(None, title='返回数据')


class People(BaseModel):
    """
    """
    name: str = Field('', title='姓名')
    age: int = Field(0, title='年龄')
    gender: str = Field('', title='性别', description='男/女')


class ChatGPT:

    def __init__(self, platform='chat_any_where', model='moonshot-v1-32k', stop=None, stream=False, temperature=None):
        """
        """
        if platform == 'chat_any_where':
            self.model = Config.OPENAI['model']
            self.client = OpenAI(api_key=Config.OPENAI['api_key'], base_url=Config.OPENAI['base_url'])
            self.client_async = AsyncOpenAI(api_key=Config.OPENAI['api_key'], base_url=Config.OPENAI['base_url'])
        elif platform == 'moonshot':
            self.model = Config.MOONSHOT['model']
            self.client = OpenAI(api_key=Config.MOONSHOT['api_key'], base_url=Config.MOONSHOT['base_url'])
            self.client_async = AsyncOpenAI(api_key=Config.MOONSHOT['api_key'], base_url=Config.MOONSHOT['base_url'])


        self.stop = stop
        self.stream = stream
        self.temperature = temperature

    def llm(self,
            system_text='',
            user_text=''):
        """
        :param system_text:
        :param user_text:
        :return:
        """
        params = dict()
        params['model'] = self.model
        messages = []
        if system_text:
            messages.append({'role': 'system', 'content': system_text})
        if user_text:
            messages.append({'role': 'user', 'content': user_text})
        params['messages'] = messages
        if self.stop:
            params['stop'] = self.stop
        if self.temperature is not None:
            params['temperature'] = self.temperature
        params['stream'] = self.stream

        ret = self.client.chat.completions.create(**params)

        return ret.choices[0].message.content

    async def llm_async(self,
                        system_text='',
                        user_text=''):
        """ 异步
        :param system_text:
        :param user_text:
        :return:
        """
        params = dict()
        params['model'] = self.model  # 指定模型
        messages = []
        if system_text:
            messages.append({'role': 'system', 'content': system_text})
        if user_text:
            messages.append({'role': 'user', 'content': user_text})
        params['messages'] = messages
        if self.stop:
            params['stop'] = self.stop
        if self.temperature is not None:
            params['temperature'] = self.temperature
        params['stream'] = self.stream

        ret = await self.client_async.chat.completions.create(**params)

        return ret.choices[0].message.content

    @retry(count=3)
    async def json_loads_content(self,
                                 system_text='',
                                 user_text='',
                                 tags=None,
                                 ):
        """ 处理gpt结果
        :param system_text:
        :param user_text:
        :param tags: 标签列表
        :return:
        """
        try:
            content = await self.llm_async(system_text=system_text, user_text=user_text)
            res = match_texts(content, *tags)
            res['gpt_logic'] = content  # gpt逻辑
            return res
        except Exception as ex:
            raise ex

    def get_embedding(self, text, model: str="text-embedding-ada-002"):
        """
        :param text:
        :param model:
        :return:
        """
        text = text.replace("\n", " ")
        return self.client.embeddings.create(input=[text], model=model).data[0].embedding

    @func_log(title='智能理解推理')
    def understand_match_smart(self, question: str, text: str, match_contents: list[str] = None, iter_time=3) -> GptResult:
        """ 智能理解匹配
        :param question: 要回答的问题
        :param text: 回答的文本
        :param match_contents: 需要匹配的答案选项
        :param iter_time: 迭代次数
        :return:
        """
        result = GptResult()
        now = datetime.datetime.now()
        prompt = '### Instruction:\n' \
                 "任务描述：你是一名服务工作者，你正在与客户进行对话，对话如下：\n" \
                 "你问客户：" + question + "\n" \
                 "客户回答：" + text + "'\n" \
                 "请问客户的<回答>与<选项列表>中的哪个<选项>表达的是近似的意思？" \
                 "如果客户的<回答>与<选项列表>中的某个<选项>直接对应，那么直接返回该<选项>即可；如果没有直接相等的，请进行推理后得出答案，但不用输出推理过程。\n" \
                 "如果客户的<回答>推理不出<选项列表>中的任何<选项>，则返回的answer为\'\'，即空值。\n" \
                 "请注意，只能选择一个选项。\n" \
                 "当前时间为（24小时制）：" + str(now) + "\n" \
                 "请 step-by-step 进行判断。\n" \
                 "<选项列表>：" + str(match_contents) + "\n" \
                 "返回格式：返回的内容可直接被python json 加载为dict，具体结构为：{\"reasoning\":\"<你的思考过程>\", \"answer\":\"<选项>\"}\n" \
                 '### Output:\n' \
 \

        # classification
        complete_msg = self.llm(user_text=prompt)
        # post process
        try:
            result.data = ujson.loads(complete_msg)

        except ujson.JSONDecodeError:
            # 结果式不正确时，推理的答案可能隐藏在推理过程中，通过关键字提炼一下
            keyword = r"{'reasoning': '.*', 'answer': '.*'}"
            pattern = re.compile(keyword)
            result_search = pattern.search(complete_msg)
            if result_search:
                result.data = eval(result_search.group())

        except Exception as ex:
            if iter_time > 0:
                iter_time -= 1
                result = self.understand_match_smart(question, text, match_contents, iter_time=iter_time)

            result.code = 100
            result.message = str(ex)

        return result

    def generate_courtesy_title(self, target: People, caller: People) -> GptResult:
        """
        :param target: 被服务者
        :param caller: 服务者
        :return:
        """
        result = GptResult()
        prompt = "任务描述：请根据服务者与客人的基本信息，请思考服务者在遵循中华传统礼仪的基础之上应该如何亲切、礼貌地称呼客人？最后只输出<尊称>，思考过程不用返回。" \
                 "请注意，女性不冠夫姓。\n" \
                 "服务者年龄：" + str(caller.age) + "\n" \
                 "服务者性别：" + caller.gender + "\n" \
                 "被服务者年龄：" + str(target.age) + "\n" \
                 "被服务者名称：" + target.name + "\n" \
                 "被服务者性别：" + target.gender + "\n" \
                 "返回格式要求：返回的内容可直接被python json 加载为dict，具体结构为：{\"courtesy_title\":\"<尊称>\"}\n，请严格遵守返回格式要求。" \
                 "返回内容举例：{\"courtesy_title\":\"周叔叔\"}, {\"courtesy_title\":\"周爷爷\"}, {\"courtesy_title\":\"周先生\"}" \
                 ", {\"courtesy_title\":\"周奶奶\"}, {\"courtesy_title\":\"周小姐\"}, {\"courtesy_title\":\"周阿姨\"}" \


        complete_msg = self.llm(user_text=prompt)
        try:
            res_json = ujson.loads(complete_msg)
            result.data = res_json.get('courtesy_title', '')

        except Exception as ex:
            result.data = ''

        return result

    def extract_animal_keys(self, text: str) -> GptResult:
        """ 提取动物关键字
        :param text:
        :return:
        """
        result = GptResult()
        prompt = '### Instruction:\n' \
                 "请从以下<文本>中，提取与所指定<类型>匹配的关键词，并严格按照<返回格式>返回。" \
                 "如果<文本>为空，则直接返回[]。\n" \
                 "如果<文本>中提取不到所指定<类型>匹配的关键词，则返回[]，即无任何关键字信息。\n" \
                 "请注意: 家禽、动物、神话动物是一类动物的统称，不能作为单独动物的名字使用。\n" \
                 "<文本>:" + text + "\n" \
                 "<类型>:" + "动物的名字" + "\n" \
                 "返回格式：请返回一个匹配结果列表，每捕获到一个匹配的关键词，即存为一个列表的一个元素，返回的内容可直接被python json.load 加载为 list" \
                 '### Output:\n' \

        complete_msg = self.llm(user_text=prompt)
        try:
            result.data = ujson.loads(complete_msg)

        except Exception as ex:
            result.data = []

        return result


if __name__ == '__main__':
    """
    """
    # question = "请问你是要睡眠报告还是睡眠咨询？"
    # text = "我现在睡觉不知道打不打呼噜？"
    # match_contents = ['睡眠报告', '睡眠咨询']
    #
    # result = CartGPT.understand_match_smart(question, text, match_contents)
    #
    # print(result)

    # caller = People(name='畅垒', age=20, gender='男')
    # target = People(name='畅翠云', age=25, gender='女')

    # text = """
    #     [{"报告时间":"2024-05-30 12:00:00","平均呼吸率":"17rpm","平均心率":"55bpm","清醒时长":"61min","浅睡时长":"344min","深睡时长":"101min","入睡时间":"2024-05-31 00:39:00","醒来时间":"2024-05-31 09:01:00","离床次数":1,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":94},{"报告时间":"2024-05-29 12:00:00","平均呼吸率":"16rpm","平均心率":"56bpm","清醒时长":"122min","浅睡时长":"340min","深睡时长":"62min","入睡时间":"2024-05-29 23:46:00","醒来时间":"2024-05-30 08:29:00","离床次数":2,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":83},{"报告时间":"2024-05-28 12:00:00","平均呼吸率":"17rpm","平均心率":"58bpm","清醒时长":"139min","浅睡时长":"353min","深睡时长":"73min","入睡时间":"2024-05-28 22:58:00","醒来时间":"2024-05-29 08:03:00","离床次数":1,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":77},{"报告时间":"2024-05-27 12:00:00","平均呼吸率":"17rpm","平均心率":"55bpm","清醒时长":"121min","浅睡时长":"314min","深睡时长":"94min","入睡时间":"2024-05-28 00:39:00","醒来时间":"2024-05-28 08:54:00","离床次数":1,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":87},{"报告时间":"2024-05-26 12:00:00","平均呼吸率":"18rpm","平均心率":"62bpm","清醒时长":"207min","浅睡时长":"270min","深睡时长":"81min","入睡时间":"2024-05-27 01:07:00","醒来时间":"2024-05-27 09:07:00","离床次数":1,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":79},{"报告时间":"2024-05-25 12:00:00","平均呼吸率":"17rpm","平均心率":"58bpm","清醒时长":"159min","浅睡时长":"391min","深睡时长":"69min","入睡时间":"2024-05-26 01:52:00","醒来时间":"2024-05-26 11:34:00","离床次数":3,"体动次数评价":"偏多","呼吸暂停或低通气风险评估":"无风险","睡眠质量评分":81}]
    # """
    # chat_gpt = ChatGPT()
    # complete_msg = chat_gpt.llm(
    #     system_text='睡眠数据:[{\"报告时间\":\"2024-06-02 12:00:00\",\"平均呼吸率\":\"17rpm\",\"平均心率\":\"55bpm\",\"清醒时长\":\"46min\",\"浅睡时长\":\"292min\",\"深睡时长\":\"138min\",\"入睡时间\":\"2024-06-03 00:55:00\",\"醒来时间\":\"2024-06-03 08:40:00\",\"离床次数\":0,\"体动次数评价\":\"偏多\",\"呼吸暂停或低通气风险评估\":\"无风险\",\"睡眠质量评分\":94},{\"报告时间\":\"2024-06-01 12:00:00\",\"平均呼吸率\":\"16rpm\",\"平均心率\":\"59bpm\",\"清醒时长\":\"134min\",\"浅睡时长\":\"325min\",\"深睡时长\":\"145min\",\"入睡时间\":\"2024-06-02 01:21:00\",\"醒来时间\":\"2024-06-02 11:13:00\",\"离床次数\":1,\"体动次数评价\":\"偏多\",\"呼吸暂停或低通气风险评估\":\"无风险\",\"睡眠质量评分\":82},{\"报告时间\":\"2024-05-30 12:00:00\",\"平均呼吸率\":\"17rpm\",\"平均心率\":\"55bpm\",\"清醒时长\":\"61min\",\"浅睡时长\":\"344min\",\"深睡时长\":\"101min\",\"入睡时间\":\"2024-05-31 00:39:00\",\"醒来时间\":\"2024-05-31 09:01:00\",\"离床次数\":1,\"体动次数评价\":\"偏多\",\"呼吸暂停或低通气风险评估\":\"无风险\",\"睡眠质量评分\":94},{\"报告时间\":\"2024-05-29 12:00:00\",\"平均呼吸率\":\"16rpm\",\"平均心率\":\"56bpm\",\"清醒时长\":\"122min\",\"浅睡时长\":\"340min\",\"深睡时长\":\"62min\",\"入睡时间\":\"2024-05-29 23:46:00\",\"醒来时间\":\"2024-05-30 08:29:00\",\"离床次数\":2,\"体动次数评价\":\"偏多\",\"呼吸暂停或低通气风险评估\":\"无风险\",\"睡眠质量评分\":83},{\"报告时间\":\"2024-05-28 12:00:00\",\"平均呼吸率\":\"17rpm\",\"平均心率\":\"58bpm\",\"清醒时长\":\"139min\",\"浅睡时长\":\"353min\",\"深睡时长\":\"73min\",\"入睡时间\":\"2024-05-28 22:58:00\",\"醒来时间\":\"2024-05-29 08:03:00\",\"离床次数\":1,\"体动次数评价\":\"偏多\",\"呼吸暂停或低通气风险评估\":\"无风险\",\"睡眠质量评分\":77}];这是我近段时间的睡眠数据;每个对象代表一天;接下来请你扮演一个睡眠助手;我会咨询一些关于睡眠的问题;问题与睡眠不相关则婉拒;每次回答都需要以医生口吻对我进行回复;对我的数据进行分析，我的问题如果与睡眠数据有关，则在回答的内容需要结合我的数据进行回复;用户在回答问题时提供任意信息，需要以一个情绪安抚大师的角色，给予客户暖心的安慰回复或者询问情况;最近的睡眠数据可以与之前一段时间的数据进行比较，有相关问题时，给出差异点和进行问询.',
    #     user_text='我昨天和今天的睡眠数据有什么区别'
    # )

    prompt = f"""
            用python程序输出一个九九乘法口诀表
            """
    chat_gpt = ChatGPT()
    result = chat_gpt.llm(user_text=prompt)
    print(result)

    # 高质量的prompt, 应该具有以下要素：








