import random
from http import HTTPStatus

from dashscope import Generation


class AI:
    def __init__(self, role=1, limit_words=30, mode=0, level=1, tone=1, pointer_template=""):
        # role表示AI的在对话中扮演的角色,默认为智能助手,1表示智能助手(同默认),2表示英语老师,3表示数学老师,4表示科学家,5表示好朋友,6表示机器人,7表示编程老师
        self.role = role
        if self.role == 1:
            self.role_name = "helpful assistant"
        elif self.role == 2:
            self.role_name = "English teacher"
        elif self.role == 3:
            self.role_name = "Math teacher"
        elif self.role == 4:
            self.role_name = "scientist"
        elif self.role == 5:
            self.role_name = "good friend"
        elif self.role == 6:
            self.role_name = "robot"
        elif self.role == 7:
            self.role_name = "programming teacher"
        else:
            self.role_name = "helpful assistant"
        self.limit_words = limit_words
        self.__messages = []
        # mode表示ai的特点,默认直接回答用户问题,1表示回答完用户问题之后会主动提问用户一个与之相关的问题,2表示回答完用户问题之后会尽可能地让用户一些联想思考发散的空间,3表示不直接回答用户的问题,但是会给用户一些这方面的提示
        self.mode = mode
        # level表示ai回复偏客观还是主观,0表示主观,1表示客观
        self.level = level
        # tone表示ai回复的语气语调,1表示平静,2表示可爱俏皮,3表示活泼,4表示严肃,5表示开心,6表示伤感,7表示激动高涨
        self.tone = tone
        # pointer_template表示用户自定义的提示词模版,如果用户配置了提示词模版则忽略上面的mode、level、tone、limit_words 4个参数
        self.pointer_template = pointer_template
        # 互动轮次
        self.interactive_round = 0

    def __generate_prompt_template(self, message):
        if self.pointer_template:
            return f"{message},{self.pointer_template}"
        msg = f"{message},回答字数限制在{self.limit_words}个字以内"
        if self.mode == 1:
            msg += ",回答用户的问题之后再根据用户的回答对用户提一个相关的问题"
        elif self.mode == 2:
            msg += ",回答用户的问题之后尽可能地让用户一些联想思考发散的空间"
        elif self.mode == 3:
            msg += ",不直接回答用户的问题,但是给予用户一些提示让他自行思考"

        if self.level == 0:
            msg += ",内容可以带有一定的主观色彩,尽可能地发挥自己的联想"
        elif self.level == 1:
            msg += ",内容尽可能地客观中立"

        if self.tone == 1:
            msg += ",语气尽量平静一点"
        elif self.tone == 2:
            msg += ",语气尽量可爱俏皮一点,必要时可以适当撒娇卖萌"
        elif self.tone == 3:
            msg += ",语气尽量活泼一点"
        elif self.tone == 4:
            msg += ",语气尽量严肃一点"
        elif self.tone == 5:
            msg += ",语气尽量表现得开心一些"
        elif self.tone == 6:
            msg += ",语气情绪尽量表现得伤感一点,像是有心事的样子"
        elif self.tone == 7:
            msg += ",情绪尽可能激烈高涨一点"
        return msg

    def __generate_messages(self, msg, is_multipart=False):
        if not is_multipart:
            self.__messages = []
        if self.interactive_round >= 50:
            self.interactive_round = 0
            self.__messages = []
        if self.interactive_round == 0:
            self.__messages.append({'role': 'system', 'content': f'You are a {self.role_name}.'})

        self.__messages.append({'role': 'user', 'content': f"{msg},回答字数限制在{self.limit_words}个字以内"})

    def chat(self, message):
        msg = self.__generate_prompt_template(message)
        # print(msg)
        self.__generate_messages(msg)
        # print(self.__messages)
        response = Generation.call(model="qwen-turbo",
                                   messages=self.__messages,
                                   # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
                                   seed=random.randint(1, 10000),
                                   temperature=0.8,
                                   top_p=0.8,
                                   top_k=50,
                                   # 将输出设置为"message"格式
                                   result_format='message')
        if response.status_code == HTTPStatus.OK:
            return response.output.choices[0].message.content
        return response.message

    def multipart_chat(self, message):
        msg = self.__generate_prompt_template(message)
        # print(msg)
        self.__generate_messages(msg, True)
        # print(self.__messages)
        response = Generation.call(model="qwen-turbo",
                                   messages=self.__messages,
                                   # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
                                   seed=random.randint(1, 10000),
                                   temperature=0.8,
                                   top_p=0.8,
                                   top_k=50,
                                   # 将输出设置为"message"格式
                                   result_format='message')
        if response.status_code == HTTPStatus.OK:
            self.interactive_round += 1
            result = response.output.choices[0].message.content
            self.__messages.append({"role": "assistant", "content": result})
            return result
        return response.message

    def multipart_stream_chat(self, message):
        msg = self.__generate_prompt_template(message)
        # print(msg)
        self.__generate_messages(msg, True)
        # print(self.__messages)

        responses = Generation.call(model="qwen-turbo",
                                    messages=self.__messages,
                                    # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
                                    seed=random.randint(1, 10000),
                                    temperature=0.8,
                                    top_p=0.8,
                                    top_k=50,
                                    stream=True,
                                    # 增量式流式输出
                                    incremental_output=True,
                                    # 将输出设置为"message"格式
                                    result_format='message')
        full_content = ""

        for response in responses:
            if response.status_code == HTTPStatus.OK:
                res = response.output.choices[0].message.content
                full_content += res
                print(res, end="")
            else:
                full_content = response.message
                break
        print()
        self.interactive_round += 1

        self.__messages.append({"role": "assistant", "content": full_content})
        return full_content

    def stream_chat(self, message):
        msg = self.__generate_prompt_template(message)
        # print(msg)
        self.__generate_messages(msg)
        # print(self.__messages)
        responses = Generation.call(model="qwen-turbo",
                                    messages=self.__messages,
                                    # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
                                    seed=random.randint(1, 10000),
                                    temperature=0.8,
                                    top_p=0.8,
                                    top_k=50,
                                    stream=True,
                                    # 增量式流式输出
                                    incremental_output=True,
                                    # 将输出设置为"message"格式
                                    result_format='message')
        full_content = ""
        for response in responses:
            if response.status_code == HTTPStatus.OK:
                res = response.output.choices[0].message.content
                full_content += res
                print(res, end="")
            else:
                full_content = response.message
                break
        print()
        return full_content