# encoding: utf-8
"""
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    created by lane.chang on '28/05/2024'
    comment: 知识库相关
"""
import ujson
import numpy as np
from openai import OpenAI
from io import StringIO

from project.core.log import Colors
from project.lib.common import session_maker, get_redis, logger
from project.lib.decorator import retry
from project.model.chatgpt import KnowledgeVector, Knowledge
from project.model.basic import KlItem
from config import Config
from project.middleware.coze import Coze
from project.model.basic import ChatMessage
from project.model.chatgpt import ChatGpt


CONTEXT_TOKEN_LIMIT = 3000
api_key = 'sk-D76oKfRv1z4CaIvVGQBTrC9m9mT5xDGOkp24XUJNBdixb3WY'
base_url = 'https://api.chatanywhere.tech/v1'
model = 'text-embedding-ada-002'


class KnowledgeService:

    @staticmethod
    def add_knowledge(organization_code, question, answer):
        """
        :param organization_code:
        :param question:
        :param answer:
        :return:
        """
        with session_maker() as session:
            knowledge = session.query(KnowledgeVector).filter(KnowledgeVector.organization_code == organization_code,
                                                              KnowledgeVector.question == question,
                                                              KnowledgeVector.answer == answer).first()
            if knowledge:
                return

            text = f'{question} {answer}'
            client = OpenAI(api_key=api_key, base_url=base_url)
            embedding = client.embeddings.create(input=[text], model=model).data[0].embedding

            # 插入数据
            KnowledgeVector.create_modify(
                session,
                organization_code=organization_code,
                question=question,
                answer=answer,
                embedding=embedding
            )

    @staticmethod
    def retrieve(session, text, organization_code, count=3):
        """ 抽取对应机构匹配知识库内容
        :param text:
        :param organization_code:
        :param count: 默认3条
        :return:
        """
        try:
            if not organization_code:
                return []

            knowledges = session.query(KnowledgeVector).filter(KnowledgeVector.organization_code == organization_code).all()
            if not knowledges:
                return []
            sources = [v.dict('question', 'answer') for v in knowledges]
            embeddings = [v.embedding for v in knowledges]

            client = OpenAI(api_key=api_key, base_url=base_url)
            def _get_embedding(_client, count=0):
                try:
                    if count >= 3:
                        return
                    return _client.embeddings.create(input=[text], model=model).data[0].embedding
                except Exception as e:
                    logger.critical(e, exc_info=True)
                    count += 1
                    logger.info(f'=========重试{count}次========', font_color=Colors.PURPLE.value)
                    return _get_embedding(_client, count=count)
            text_embedding = _get_embedding(client)
            # text_embedding = client.embeddings.create(input=[text], model=model).data[0].embedding
            if not text_embedding:
                return []

            document_similarities = sorted([
                (np.dot(text_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in enumerate(embeddings)
            ], reverse=True, key=lambda x: x[0])

            ctx = ""
            ctx_lst = []
            for idx, candi in enumerate(document_similarities):
                next = ctx + " " + sources[candi[1]]['question'] + sources[candi[1]]['answer']
                if len(next) > CONTEXT_TOKEN_LIMIT:  # 最大的token字数
                    break
                ctx = next
                ctx_lst.append(sources[candi[1]])

            # 最多取3条现在
            ctx_lst = ctx_lst[:count]

            return ctx_lst

        except Exception as ex:
            logger.critical(ex, exc_info=True)

    @staticmethod
    def get_knowledge_info(session, knowledge_key):
        """ 得到指定知识库信息
        :param knowledge_key:
        :param session:
        :return:
        """
        knowledges = session.query(KnowledgeVector).filter(KnowledgeVector.organization_code == knowledge_key).order_by(KnowledgeVector.update_time.desc()).all()

        sources = [v.dict('question', 'answer') for v in knowledges]
        embeddings = [v.embedding for v in knowledges]

        return {
            'sources': sources,
            'embeddings': embeddings
        }

    @staticmethod
    def get_knowledge_file_object(session, knowledge_key):
        """
        :param session:
        :param knowledge_key:
        :return:
        """
        knowledge_info = KnowledgeService.get_knowledge_info(session, knowledge_key=knowledge_key)

        file_object = StringIO()
        file_object.write(ujson.dumps(knowledge_info, ensure_ascii=False, indent=4))

        # 回到文件开头，准备读取内容
        file_object.seek(0)

        return file_object

    @staticmethod
    def flush_knowledge_streaming(session, knowledge_key, kls: list[KlItem]):
        """ 刷新整个知识库(流式)
        :param session:
        :param knowledge_key:
        :param kls:
        :return:
        """
        rep_kl_map = {(v.question, v.answer) for v in kls}

        knowledges = session.query(KnowledgeVector).filter(KnowledgeVector.organization_code == knowledge_key).all()
        exists_kl_map = {(v.question, v.answer): v for v in knowledges}

        for kl in kls:
            if (kl.question, kl.answer) in exists_kl_map:
                continue

            text = f'{kl.question} {kl.answer}'
            client = OpenAI(api_key=api_key, base_url=base_url)
            embedding = client.embeddings.create(input=[text], model=model).data[0].embedding

            # 插入数据
            KnowledgeVector.create_modify(
                session,
                organization_code=knowledge_key,
                question=kl.question,
                answer=kl.answer,
                embedding=embedding
            )

            yield f'插入一条 问题: {kl.question} 答案: {kl.answer} \n'

        for kl in knowledges:
            if (kl.question, kl.answer) in rep_kl_map:
                continue
            # 删除
            KnowledgeVector.delete_by_id(session, kl.id)

            yield f'删除一条 问题: {kl.question} 答案: {kl.answer} \n'

        yield '知识库更新已完成 \n'

    @staticmethod
    async def flush_rwt_knowledge_streaming(session):
        """ 刷新如味堂知识库(流式)
        :return:
        """
        rds = get_redis()
        rds_key = 'knowledge_flush_rwt_streaming'
        rds.setex(rds_key, 60, 'processing')

        result = await Coze.run_workflow(Config.COZE_OPTIONS['get_feishu_rwt_knowledge'])

        values_json_str = result['data']['valueRange']['valuesJsonString']
        values_json = ujson.loads(values_json_str)
        kls = [KlItem(question=v[0], answer=v[1]) for idx, v in enumerate(values_json) if idx != 0 and v[0] and v[1]]

        for message in KnowledgeService.flush_knowledge_streaming(session, knowledge_key='86', kls=kls):
            yield message

        rds.delete(rds_key)

    @staticmethod
    @retry()
    async def get_embedding(text):
        """ 得到文本向量化数据
        :param text:
        :return:
        """
        client = OpenAI(api_key=api_key, base_url=base_url)
        text_embedding = client.embeddings.create(input=[text], model=model).data[0].embedding or []

        return text_embedding

    @staticmethod
    async def flush_knowledge(session, organization_code, text, knowledge_detail_code=None):
        """ 刷新知识库
        :param session:
        :param organization_code:
        :param text:
        :param knowledge_detail_code:
        :return:
        """
        knowledge = None
        if knowledge_detail_code is not None:
            knowledge = session.query(Knowledge).filter(Knowledge.knowledge_detail_code == knowledge_detail_code).first()
        if not knowledge:
            knowledge = Knowledge()
            session.add(knowledge)

        knowledge.knowledge_detail_code = knowledge_detail_code
        knowledge.organization_code = organization_code
        knowledge.text = text
        knowledge.text_embedding = await KnowledgeService.get_embedding(text) # 文本向量化

        session.commit()

        return knowledge

    @staticmethod
    async def delete_knowledge(session, knowledge_detail_code):
        """ 删除知识库
        :param session:
        :param knowledge_detail_code:
        :return:
        """
        session.query(Knowledge).filter(Knowledge.knowledge_detail_code == knowledge_detail_code).delete()
        session.commit()

    @staticmethod
    async def delete_knowledge_batch(session, knowledge_detail_codes):
        """ 删除知识库(批量)
        :param session:
        :param knowledge_detail_codes:
        :return:
        """
        session.query(Knowledge).filter(Knowledge.knowledge_detail_code.in_(knowledge_detail_codes)).delete()
        session.commit()

    @staticmethod
    async def get_knowledge_match_list(session, text, organization_code=None, knowledge_detail_codes=None, count=3):
        """ 取得知识库匹配列表
        :param session:
        :param text:
        :param organization_code:
        :param knowledge_detail_codes:
        :param count:
        :return:
        """
        query = session.query(Knowledge)
        if organization_code is not None:
            query = query.filter(Knowledge.organization_code == organization_code)
        if knowledge_detail_codes is not None:
            query = query.filter(Knowledge.knowledge_detail_code.in_(knowledge_detail_codes))
        knowledges = query.all()
        if not knowledges:
            return []

        sources = [v.dict('knowledge_detail_code', 'text') for v in knowledges]
        embeddings = [v.text_embedding for v in knowledges]

        # 知识库匹配
        client = OpenAI(api_key=api_key, base_url=base_url)
        text_embedding = client.embeddings.create(input=[text], model=model).data[0].embedding

        document_similarities = sorted([
            (np.dot(text_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in enumerate(embeddings)
        ], reverse=True, key=lambda x: x[0])

        ctx = ""
        ctx_lst = []
        for idx, candi in enumerate(document_similarities):
            # next = ctx + " " + sources[candi[1]]['question'] + sources[candi[1]]['answer']
            next = ctx + " " + sources[candi[1]]['text']
            if len(next) > CONTEXT_TOKEN_LIMIT:  # 最大的token字数
                break
            ctx = next
            ctx_lst.append(sources[candi[1]])

        # 最多取3条现在
        ctx_lst = ctx_lst[:count]

        return ctx_lst

    @staticmethod
    async def do_knowledge_chat_streaming(session, user_message, organization_code='', knowledge_detail_codes=None, chat_mode='normal', chat_messages: list[ChatMessage] = None):
        """ 知识库聊天(流式)
        :param session:
        :param text:
        :param organization_code:
        :param knowledge_detail_codes:
        :param chat_mode: 聊天模式(normal(正常)/simple(简洁))
        :param chat_messages: 对话上下文
        :return:
        """
        # 对话历史
        user_memories = [str(v) for v in chat_messages or []]

        # 掌握的背景知识
        knowledges = await KnowledgeService.get_knowledge_match_list(session, user_message, organization_code, knowledge_detail_codes)
        related_knowledge = [v['text'] for v in knowledges]

        instruction = """
        ###Instruction
        请结合你掌握的背景知识以及你和用户的对话历史，回复用户的对话。{simple_config}

        你们的对话历史：{user_memories}
        
        用户对你说：{user_message}

        针对这段对话你所掌握的背景知识为：{related_knowledge}

        ###Output
        你会对用户说：
        """
        simple_config = ''
        if chat_mode == 'simple':
            instruction = instruction.format(simple_config='注意请尽量简短回复，控制内容在100个字以内。')
        instruction = instruction.format(user_memories=user_memories, related_knowledge=related_knowledge, simple_config=simple_config, user_message=user_message)
        logger.info(f'智能体对话: \n{instruction}', font_color=Colors.PURPLE.value)

        chat_gpt = ChatGpt(chat_model_name=Config.DEFAULT_MODEL)
        response_stream = chat_gpt.llm_async_streaming(user_text=instruction)

        return response_stream


if __name__ == "__main__":
    """
    """
    # organization_code = '86'
    # data = [
    #     {
    #         "question": "Wifi密码是多少？",
    #         "answer": "Wifi名称是RUWEITANG，Wifi密码是rwt888888，全馆覆盖免费Wi-Fi，方便您在线办公或娱乐。"
    #     }
    # ]
    # for rec in data:
    #     KnowledgeService.add_knowledge(organization_code, rec['question'], rec['answer'])

    with session_maker() as session:
        result = KnowledgeService.retrieve(session,'如果遇到紧急情况，应如何求助？', '86')

        for v in result:
            print(v)

    # asyncio.run(KnowledgeService.flush_rwt_knowledge_streaming())
