# encoding:utf-8

import openai

import config

user_session = dict()

model_engine = openai.Model(engine='text-davinci-002')


# OpenAI对话模型API (可用)
class Chat:
    def __init__(self):
        openai.api_key = config.config.get("api_key")
        proxy = config.config.get('proxy')
        if proxy:
            openai.proxy = proxy


    async def chat_with_openai(message):
        response = openai.Completion.create(
            model=config.config.get('davinci-model'),  # 对话模型的名称
            prompt=message,
            temperature=0.9,  # 值在[0,1]之间，越大表示回复越具有不确定性
            # max_tokens=4096,  # 回复最大的字符数
            top_p=1,
            frequency_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            presence_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            stop=["\n\n\n"],
            stream=True
        )
        for chunk in response:
            yield chunk['choices'][0]['text']
        yield "data: {}\n\n".format("end")


    def reply(self, query, context=None):
        # acquire reply content
        if not context or not context.get('type') or context.get('type') == 'TEXT':
            from_user_id = context['from_user_id']
            if query == '#清除记忆':
                Session.clear_session(from_user_id)
                return '记忆已清除'

            new_query = Session.build_session_query(query, from_user_id)

            # if context.get('stream'):
            #     # reply in stream
            #     return self.reply_text_stream(query, new_query, from_user_id)

            reply_content = self.reply_text_stream(query, new_query, from_user_id, 0)
            return reply_content


    def reply_text(self, query, user_id, retry_count=0):
        try:
            new_query = Session.build_session_query(query, user_id)
            response = openai.ChatCompletion.create(
                model=config.config.get("text-model"),  # 对话模型的名称
                messages=new_query,
                temperature=0.9,  # 值在[0,1]之间，越大表示回复越具有不确定性
                top_p=1,
                frequency_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
                presence_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            )
            reply_content = response.choices[0]['message']['content']
            used_token = response['usage']['total_tokens']
            if reply_content:
                # save conversation
                Session.save_session(query, reply_content, user_id, used_token)
            return response.choices[0]['message']['content']
        except openai.error.RateLimitError as e:
            # rate limit exception
            if retry_count < 1:
                return self.reply_text(query, user_id, retry_count + 1)
            else:
                return "提问太快啦，请休息一下再问我吧"
        except openai.error.APIConnectionError as e:
            return "我连接不到网络，请稍后重试"
        except openai.error.Timeout as e:
            return "我没有收到消息，请稍后重试"
        except Exception as e:
            Session.clear_session(user_id)
            return "请再问我一次吧"


    def reply_text_stream(self, query, new_query, user_id, retry_count=0):
        try:
            res = openai.Completion.create(
                model=config.config.get('davinci-model'),  # 对话模型的名称
                prompt=query,
                temperature=0.9,  # 值在[0,1]之间，越大表示回复越具有不确定性
                # max_tokens=4096,  # 回复最大的字符数
                top_p=1,
                frequency_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
                presence_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
                stop=["\n\n\n"],
                stream=True
            )
            return _process_reply_stream(query, res, user_id)

        except openai.error.RateLimitError as e:
            # rate limit exception
            if retry_count < 1:
                return self.reply_text_stream(query, user_id, retry_count + 1)
            else:
                return "提问太快啦，请休息一下再问我吧"
        except openai.error.APIConnectionError as e:
            return "我连接不到网络，请稍后重试"
        except openai.error.Timeout as e:
            return "我没有收到消息，请稍后重试"
        except Exception as e:
            # unknown exception
            Session.clear_session(user_id)
            return "请再问我一次吧"


    def chat_with_openai_on_text(message):
        response = openai.ChatCompletion.create(
            model=config.config.get("text-model"),  # 对话模型的名称
            messages=message,
            temperature=0.9,  # 值在[0,1]之间，越大表示回复越具有不确定性
            top_p=1,
            frequency_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            presence_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
        )
        return "data: {}\n\n".format(response.choices[0]['message']['content'])


    def generate(message: str):
        response = openai.Completion.create(
            model=config.config.get("davinci-model"),  # 对话模型的名称
            prompt=message,
            temperature=0.9,  # 值在[0,1]之间，越大表示回复越具有不确定性
            # max_tokens=4096,  # 回复最大的字符数
            top_p=1,
            frequency_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            presence_penalty=0.0,  # [-2,2]之间，该值越大则更倾向于产生不同的内容
            stop=["\n\n"],
            stream=True
        )
        for chunk in response:
            yield "data: {}\n\n".format(chunk['choices'][0]['text'])

        yield "data: {}\n\n".format("end")


def _process_reply_stream(
        query: str,
        reply: dict,
        user_id: str
) -> str:
    full_response = ""
    for response in reply:
        if response.get("choices") is None or len(response["choices"]) == 0:
            raise Exception("OpenAI API returned no choices")
        if response["choices"][0].get("finish_details") is not None:
            break
        if response["choices"][0].get("text") is None:
            raise Exception("OpenAI API returned no text")
        if response["choices"][0]["text"] == "<|endoftext|>":
            break
        full_response += response["choices"][0]["text"]
        print("回复消息：" + response["choices"][0]["text"])
        yield "data: {}\n\n".format(response["choices"][0]["text"])
    print("总回复消息：" + full_response)
    Session.save_session(query, full_response, user_id)
    yield "data: {}\n\n".format("end")


class Session(object):
    @staticmethod
    def build_session_query(query, user_id):
        '''
        build query with conversation history
        e.g.  [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": "Who won the world series in 2020?"},
            {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
            {"role": "user", "content": "Where was it played?"}
        ]
        :param query: query content
        :param user_id: from user id
        :return: query content with conversaction
        '''
        session = user_session.get(user_id, [])
        if len(session) == 0:
            system_prompt = "你好，我是ChatGPT，一个基于 GPT-3.5 架构的大型语言模型，由 OpenAI 训练而成。我可以进行自然语言处理，包括问答、语言翻译、文本生成等任务。我能够提供广泛的知识，并且可以与用户进行交互，以回答各种问题。无论你需要了解哪方面的知识，我都会尽力为你提供帮助"
            system_item = {'role': 'system', 'content': system_prompt}
            session.append(system_item)
            user_session[user_id] = session
        user_item = {'role': 'user', 'content': query}
        session.append(user_item)
        return session

    @staticmethod
    def save_session(query, answer, user_id, used_tokens=0):
        max_tokens = 1000
        if not max_tokens or max_tokens > 4000:
            # default value
            max_tokens = 1000
        session = user_session.get(user_id)
        if session:
            # append conversation
            gpt_item = {'role': 'assistant', 'content': answer}
            session.append(gpt_item)

        if used_tokens > max_tokens and len(session) >= 3:
            # pop first conversation (TODO: more accurate calculation)
            session.pop(1)
            session.pop(1)

    @staticmethod
    def clear_session(user_id):
        user_session[user_id] = []
