import asyncio
import json
import uuid
from typing import List

from openai import BaseModel, OpenAI
from pydantic import Field

from app.business.milvus_content import FileListBiz
from app.business.model_config import get_model_config

from app.business.session_msg import SessionMsgCreateBiz, SessionMsgListBiz
from app.initializer import init_config
from app.utils.chatglm import chat_zhipu_ai
from app.utils.chatgpt import chat_gpt_azure, chat_gpt_azure_msg
from app.utils.milvus_do import MilvusDo
from app.utils.upload_in_milvus_util import search_metedata

fileList = '''
'''


class rag_chatDetailMdl(BaseModel):
    content: str = Field(..., title="聊天内容", description="聊天内容")
    collection: str = Field(default=None, title="知识库", description="知识库")
    session_id: int = Field(..., title="会话ID", description="会话ID")
    history_num: int = Field(default=5, title="会话条数", description="会话条数")
    history_status: bool = Field(default=True, title="开启历史会话", description="开启历史会话")
    model: str = Field(default="qwen", title="模型 qwen 或 deepseek ", description="模型 qwen 或 deepseek")


class session_entity(BaseModel):
    userName: str = ""
    chat_session: str = ""
    chat_content: str = ""


# class rag_chatDetailBiz(rag_chatDetailMdl):
#
#     async def chat_rag(self):
#
#         r = get_redis_connection()
#         if self.chat_session == "":
#             r = get_redis_connection()
#             self.chat_session = self.content + ":" + str(uuid.uuid4())
#             r.rpush(self.userName, self.chat_session)
#         # 查询聊天记录的key
#         session_key = self.userName + ':' + self.chat_session
#         # 判断聊天记录是否为空 为空存入     {
#         #       "role": "system",
#         #       "content": "You are a helpful assistant."
#         #     },
#         if not r.exists(session_key):
#             # 初始化聊天记录
#             r.rpush(session_key, json.dumps({"role": "system", "content": "You are a helpful assistant."}))
#         # 获取聊天记录
#         r.rpush(session_key, json.dumps({"role": "user", "content": self.content}))
#         data = r.lrange(session_key, 0, -1)
#         if len(data) < 6:
#
#             json_objects = [json.loads(item) for item in data]
#         else:
#             # 获取第一个元素
#             first_element = data[:1]
#
#             # 获取最后四个元素
#             last_four_elements = data[-4:]
#
#             # 拼接成新列表
#             data = first_element + last_four_elements
#             json_objects = [json.loads(item) for item in data]
#         print("!!!!!!!!!")
#         print(json_objects)
#
#         if self.collection is None or self.collection == "":
#             chat_content = self.content
#             result = ""
#             if result == "":
#                 result = chat_qwen(json_objects)
#             r.rpush(session_key, json.dumps({"role": "assistant", "content": result}))
#             return result
#         else:
#             return "collection is not None"


class rag_chatDetailBiz_stream(rag_chatDetailMdl):

    def rag_search_milvus(self, searchName, memory_name, limit=10, filter=None):
        milvusSearch = MilvusDo()
        results = milvusSearch.search_by_searchName(searchName=searchName, collectionName=memory_name, limit=limit,
                                                    filter=filter)
        return results

    async def chat_rag(self):
        result_list = []
        import json
        session_create_entity = SessionMsgCreateBiz(session_id=self.session_id, content="")
        session_create_entity.session_id = self.session_id
        session_get_list = SessionMsgListBiz()
        session_get_list.session_id = self.session_id
        # 查询聊天记录的key
        historyList = await session_get_list.lst()
        # historyList 为空或则为none
        if historyList is None or len(historyList) == 0:
            session_create_entity.content = json.dumps({"role": "system", "content": "You are a helpful assistant."},
                                                       ensure_ascii=False)
            await session_create_entity.create()

        # 获取聊天记录
        chat_history = await session_get_list.lst()
        if len(chat_history) < self.history_num + 1:

            chat_msg = [json.loads(item["content"]) for item in chat_history]
            print("chat_msg:", chat_msg)
        else:
            # 获取第一个元素
            first_element = chat_history[:1]

            # 获取最后四个元素
            last = -(self.history_num - 1)
            last_four_elements = chat_history[last:]

            # 拼接成新列表
            data = first_element + last_four_elements
            chat_msg = [json.loads(item["content"]) for item in data]

        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "user", "content": self.content.encode("utf-8").decode("utf-8")},
            ensure_ascii=False)
        await session_create_entity.create()

        if self.collection is None or self.collection == "":
            chat_msg.append({"role": "user", "content": self.content})
        else:
            # 压缩聊天记录
            import json

            chat_msg_history = json.dumps(chat_msg, ensure_ascii=False)
            chat_msg_history_prompt_zip = '''
                    你是一个文档解析专家，请将以下聊天记录进行解析，根据聊天记录的上下文和用户提问内容分析用户意图。最后将用户提问内容重新修改
                    修改后的用户提问内容可以直接一句话不需要上下问就可以清晰表达用户意图，方便向量匹配和意图识别。
                    聊天记录是：''' + chat_msg_history + '''
                    用户提问是：''' + self.content + '''
                    直接返回用户提问内容，不要返回其他内容,什么语言输入用什么语言返回。
                 '''
            prompt_chat_msg_history_prompt_zip = chat_zhipu_ai(chat_msg_history_prompt_zip, [])
            print("压缩后的用户提问内容:", prompt_chat_msg_history_prompt_zip)

            # 判断文本是否是对文件列表的查询
            chat_content = prompt_chat_msg_history_prompt_zip
            prompt_check = '''
                    判断这个问题是否是对文件列表的查询 如果是对文件列表的询问返回 “是” 如果不是返回 “否”
                    问题是：''' + chat_content + '''
                    例如：住建部发布了哪些城市更新的文件？ 返回 ”是“
                 '''
            prompt_check_file_find = chat_zhipu_ai(prompt_check, [])

            print("判断答案!!!!")
            print(prompt_check_file_find)
            result_list = []
            if "是" in prompt_check_file_find:
                fileList = await  FileListBiz().fileNameList(milvus_index=self.collection)

                # fileList 转为string
                fileList = ";".join([f"{i + 1}. {item['file_name']}" for i, item in enumerate(fileList)])
                print("fileList!!!!!!!!!!!!!!:", fileList)
                if fileList == "":
                    fileList = "经严格分析，未发现相关文件"
                prompt_in = '''
                        你是一个严谨的文件名分析专家，请根据以下文件名列表回答我的问题：
                        文件名列表：
                        ''' + fileList + '''
                        问题：''' + chat_content + '''
                        要求：
                        1. 严格基于文件名文本进行分析，不得猜测文件内容
                        2. 按相关性排序输出结果（最相关在前）
                        3. 每个结果必须包含：
                           - 匹配的文件名
                           - 匹配关键词
                           - 相关性说明（精确匹配/语义关联）
                        4. 判断标准：
                            精确匹配：文件名包含问题关键词的完整词汇
                            语义关联：文件名包含问题关键词的同义词、近义词或专业术语变体
                            排除标准：仅凭后缀名、通用词（如"报告""数据"）的匹配
                        5. 无相关文件时返回："经严格分析，未发现相关文件"
                        '''

                result_entry = {
                    "index": 1,
                    "fileName": "文件列表",
                    "content": fileList
                }
                result_list.append(result_entry)
                chat_msg.append({"role": "user", "content": prompt_in})
            else:

                results = self.rag_search_milvus(searchName=chat_content,
                                                 memory_name=self.collection, limit=3)
                re_str = ""
                i = 1
                for res in results:
                    res_after = "知识点：" + "文件名" + res.fileName + "内容：" + res.text + "\n" + ";"
                    re_str = re_str + res_after
                    # fileNameList = fileNameList+str(i)+"." +"文件名"+ res.fileName +"内容："+res.text +"\n"
                    i = 1 + i

                for i, res in enumerate(results, start=1):
                    result_entry = {
                        "index": i,
                        "fileName": res.fileName,
                        "content": res.text
                    }
                    result_list.append(result_entry)
                prompt_in = '''
                      你是一个政策专家。现在需要
                      从文档知识点中 +''' + re_str + '''+ 中查找问题的答案 +''' + chat_content + '''+ 
                      要求：如果找到答案，仅使用文档的陈述来回答问题；如果未找到相关知识则回答"文档中未描述相关知识请联系管理员添加知识库"；
                      只根据知识库回答问题不要擅自编写修改内容，要求回答严谨，答案正确，不要漏掉知识点中的任何细节内容！
                      不要重复问题，直接开始回答。
                      '''
                chat_msg.append({"role": "user", "content": prompt_in})

        if self.model == "qwen":
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(
                # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="qwen").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="qwen").model_url,
            )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="qwen-plus",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=True,
                stream_options={"include_usage": True}
            )
        elif self.model == "deepseek":
            print("！！！！！！！！！！！！！！！！！！！！！！！使用deepseek模型！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！")
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(  # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="deepseek").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="deepseek").model_url, )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="deepseek-chat",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=True,
                stream_options={"include_usage": True}
            )

        # 如果 completion 不支持异步迭代，手动包装为异步生成器
        async def async_stream_wrapper(sync_stream):
            loop = asyncio.get_event_loop()
            for chunk in sync_stream:
                yield await loop.run_in_executor(None, lambda: chunk)

        content = ""
        async for chunk in async_stream_wrapper(completion):
            # 假设每个chunk是一个字典，包含'id'和'text'
            if len(chunk.choices) == 0:
                continue
            content += chunk.choices[0].delta.content.encode('utf-8').decode('utf-8')
            print(chunk.choices[0].delta.content.encode('utf-8').decode('utf-8'))
            json_ = json.dumps({'text': chunk.choices[0].delta.content.encode('utf-8').decode('utf-8')},
                               ensure_ascii=False)
            yield f"data: {json_}\n\n"
        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "assistant", "content": content.encode('utf-8').decode('utf-8')},
            ensure_ascii=False)
        # result_list 转为string格式
        if content.encode('utf-8').decode('utf-8') == "文档中未描述相关知识请联系管理员添加知识库":
            session_create_entity.content_relevancy = ""
        else:
            result_list_str = json.dumps(result_list, ensure_ascii=False)
            session_create_entity.content_relevancy = result_list_str

        session_msg_id = await session_create_entity.create()
        print("session_msg_id:", session_msg_id)
        yield f"session_msg_id: {session_msg_id}\n\n"

    async def chat_rag_poc(self):
        chat_content_base = self.content.encode("utf-8").decode("utf-8")
        result_list = []
        result_list2 = []
        import json
        session_create_entity = SessionMsgCreateBiz(session_id=self.session_id, content="")
        session_create_entity.session_id = self.session_id
        session_get_list = SessionMsgListBiz()
        session_get_list.session_id = self.session_id
        # 查询聊天记录的key
        historyList = await session_get_list.lst()
        # historyList 为空或则为none
        if historyList is None or len(historyList) == 0:
            session_create_entity.content = json.dumps({"role": "system", "content": "You are a helpful assistant."},
                                                       ensure_ascii=False)
            await session_create_entity.create()

        # 获取聊天记录
        chat_history = await session_get_list.lst()
        if len(chat_history) < self.history_num + 1:

            chat_msg = [json.loads(item["content"]) for item in chat_history]
            print("chat_msg:", chat_msg)
        else:

            first_element = chat_history[:1]

            last = -(self.history_num - 1)
            last_four_elements = chat_history[last:]

            # 拼接成新列表
            data = first_element + last_four_elements
            chat_msg = [json.loads(item["content"]) for item in data]

        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "user", "content": self.content.encode("utf-8").decode("utf-8")},
            ensure_ascii=False)
        await session_create_entity.create()

        # if self.collection is None or self.collection == "":
        #     chat_msg.append({"role": "user", "content": self.content})
        if True:
            # 压缩聊天记录
            import json
            # 获取chat_msg的最后二个元素重新构成列表
            if len(chat_msg) >= 2:
                chat_msg_question_change = chat_msg[-2:]
            else:
                chat_msg_question_change =chat_msg

#             chat_msg_history = json.dumps(chat_msg_question_change, ensure_ascii=False)
#             # 将chat_msg_history 中 Tribunal 全部替换为  Court
#             chat_msg_history = chat_msg_history.replace("Tribunal", "Court")
#             chat_msg_history = chat_msg_history.replace("tribunal", "court")
#             chat_msg_history_prompt_zip = '''
# You are a document analysis expert. Please analyze the following chat history and determine the user's intent based on the context of the chat history and the user's question. Finally, rewrite the user's question.
#
# The rewritten question should be a single sentence that clearly expresses the user's intent without requiring any context, making it suitable for vector matching and intent recognition.
#                     Chat history: ''' + chat_msg_history + '''
#                    User's question: ''' + self.content + '''
#                Only return the rewritten question, do not return any other content.
#                  '''
#             prompt_chat_msg_history_prompt_zip = chat_gpt_azure(chat_msg_history_prompt_zip)
#             print("压缩后的用户提问内容:", prompt_chat_msg_history_prompt_zip)
#
#             chat_content = prompt_chat_msg_history_prompt_zip
#             print("压缩后的用户提问内容！！！！！！！！！！！！！！！:", prompt_chat_msg_history_prompt_zip)
            chat_content =self.content.encode("utf-8").decode("utf-8").replace("Tribunal", "Court")
            chat_content =chat_content.replace("tribunal", "court")
            # 判断文本是否是对文件列表的查询
            prompt_check = f'''
                    # 指令：
                    根据“用户问题”判断用户目的是“检索”还是“问答”

                    # 要求：
                    1. 只能输出“检索”或者“问答”
                    2. 无须输出分析内容

                    # 用户问题：{chat_content}

                    # 举例:
                    例子1:
                    用户问题:Is there any comments from the court about the route by which we have exercised our disciplinary powers?
                    输出:问答

                    例子2:
                    用户问题:Identify all SFAT rulings in 2022
                    输出:检索
                '''

            check = chat_gpt_azure(prompt_check)
            if "检索" in check:
                filter_prompt = ("现在给你一个prompt，请根据这个prompt拼接 一个filter。要求："
                                 "1.返回拼接后的prompt，如果没有拼接的内容，直接返回'无拼接内容'"
                                 "2.只返回拼接后内容 "
                                 "示例如下：prompt为:Identify all SFAT rulings in 2022 拼接的filter为: year == '2022' and cate =='Ruling'  "
                                 "示例如下：prompt为:Identify all SFAT rulings in 2021 拼接的filter为: year == '2021' and cate =='Ruling'  "
                                 "示例如下：prompt为:Identify all SFAT determination in 2021 拼接的filter为: year == '2021' and cate =='Determination'  "
                                 "prompt为:" + chat_content
                                 )
                filter_prompt_result = chat_gpt_azure(filter_prompt)
                if "无拼接内容" in filter_prompt_result:
                    sparse_results = search_metedata("sfat_metadata2")
                else:
                    # 如果是对文件列表的查询
                    sparse_results = search_metedata("sfat_metadata2", filter=filter_prompt_result)
                file_meta_data = ""
                for i, result in enumerate(sparse_results):
                    file_meta_data += json.dumps(result['entity']['metadata'], ensure_ascii=False)

                prompt_in = '''
Please answer my question based on the metadata content provided. The question is: ''' + chat_content + '''.
The metadata content is: ''' + file_meta_data + '''.
Note: When analyzing the problem, do not use the "fileName" in the metadata as a reference for determining "cate", "tags" or "year"! However, when returning the file name, use the data in "fileName".
Only answer the question.When outputting the question, also output the summary of the file, along with the year and month.
Default to answering in English. Answer in the same language as the question.
                      '''
                chat_msg.append({"role": "user", "content": prompt_in})
            elif " that caused the Court to issue a disqualification order " in self.content:
                # 读取 HCMP000868_2019.pdf.txt 内容
                with open("HCMP000868_2019.pdf.txt", "r", encoding="utf-8") as file:
                    file_content = file.read()
                prompt_in = '''
                   Please answer my question based on the file content provided. The question is: ''' + self.content + '''。
                       The file content is: ''' + file_content + ''' . Default to answering in English. Answer in the same language as the question.
                      '''
                chat_msg.append({"role": "user", "content": prompt_in})
            else:
                # 判断文本是否是对文件列表的查询
#                 prompt_mmt_check = f'''
#                         # 指令：
#                         根据“用户问题”判断用户需要检索的知识库的是“mmt”还是“sfat”
#
#                         # 要求：
#                         1. 只能输出“mmt”或者“sfat”
#                         2.更具用户的问题判断知识库选择
#                         3.判断用户问题是否出现mmt 或 者 sfat 判断。或则根据mmt和sfat业务意思判断
#                        注意 MMT 意思为 Market Misconduct Tribunal
#                        SFAT 意思为  Securities and Futures Appeals Tribunal
#                         #背景：
#                         香港SFC背景概述
# 香港证券及期货事务监察委员会（SFC）是负责监管证券及期货市场的权威机构，成立于1989年，旨在维护市场公平、透明和效率，并保护投资者权益‌。其监管框架主要基于《证券及期货条例》（SFO），该条例于2003年全面生效，为打击市场失当行为和设立专业审裁处提供了法律基础‌。SFC通过执法、调查和监管措施，确保市场秩序，同时支持审裁处制度的独立运作‌。
# 1.Market Misconduct Tribunal (MMT) 的解释
# Market Misconduct Tribunal（市场失当行为审裁处）是香港根据《证券及期货条例》设立的行政审裁机构，专门处理市场失当行为案件‌。该审裁处成立于2003年，取代了原有的内幕交易审裁处，其主席由香港高等法院法官或退休法官担任，成员包括商界专业人士，确保独立性和专业性‌。其主要功能包括：
# 管辖权范围‌：涵盖内幕交易、虚假交易、操控价格、操纵证券市场、披露受禁交易数据及虚假误导资料等六类市场失当行为‌。
# 程序和标准‌：适用民事程序规则，证明标准为“盖然性标准”（Balance of Probabilities），聚焦于判断行为是否发生、行为人身份及利润/损失数额，而非刑事处罚‌。
# 执法优势‌：作为独立于SFC的全职机构，MMT能高效处理案件（例如SFC可提交案件并由提控官代表），弥补了刑事程序的高门槛和延迟问题，提升了执法效率‌。这一机制被视为香港证券监管的创新，避免了传统行政或刑事路径的局限‌。
# 2.Securities and Futures Appeals Tribunal (SFAT) 的解释
# Securities and Futures Appeals Tribunal（证券及期货事务上诉审裁处）是香港SFC体系下的上诉机构，负责审理对SFC监管决定（如牌照吊销、罚款或纪律处分）提出的申诉‌。尽管搜索结果未提供其详细背景，但基于香港法律框架，SFAT确保监管决策的公正性和可申诉性：
# 角色和功能‌：作为二级审裁机制，SFAT审查SFC的初步裁决，提供独立复核渠道，防止监管权力滥用。这有助于平衡市场效率和投资者保护，维护市场公信力‌。
# 与MMT的关系‌：不同于MMT的民事执法焦点，SFAT专注于上诉案件，两者互补构成SFC监管的“双重保障”：MMT处理行为违规的直接制裁，而SFAT处理监管争议的复审，强化整体制度韧性‌。然而，SFAT的具体运作细节（如成立时间和案例）需进一步核实。
# 总结
#
# 香港SFC通过MMT和SFAT构建了多层次的监管体系：MMT专注于市场失当行为的民事制裁，提升执法效率；SFAT则确保监管决定的公平上诉机制，共同维护市场稳定‌。这种分工体现了香港在金融监管中平衡效率与公正的创新策略，值得借鉴‌。
#                         # 用户问题：{chat_content}
#
#                         # 举例:
#                         例子1:
#                         用户问题:Is there any comments from the court about the route by which we have exercised our disciplinary powers?
#                         输出:sfat
#
#                         例子2:
#                         用户问题:Please help me to summarise the MMT proceedings against Mayer Holdings Limited and its senior management over late disclosure of inside information.
#                         输出:mmt
#
#                         例子3:
#                         用户问题:What additional factors might the Court consider when determining theappropriateness of the SFC relying on both limbs under section 194(1) of theSFO?
#                         输出:sfat
#
#                         例子4:
#                         用户问题:  Is there any comments from the Tribunal about the route by which we have exercised our disciplinary powers?
#                         输出:sfat
#                     '''
#
#                 check_mmt = chat_gpt_azure(prompt_mmt_check)
#                 if "mmt" in check_mmt:
#                     self.collection = "mmt"
#                 else:
#                     self.collection = "sfat"
                self.collection = "sfat"
                results = self.rag_search_milvus(searchName=chat_content, filter=None,
                                                 memory_name=self.collection, limit=10)
                re_str = ""
                i = 1
                for res in results:
                    res_after = "Knowledge Point: " + "File Name:" + res.fileName + "Content:" + res.text + "\n" + ";"
                    re_str = re_str + res_after
                    # fileNameList = fileNameList+str(i)+"." +"文件名"+ res.fileName +"内容："+res.text +"\n"
                    i = 1 + i

                for i, res in enumerate(results, start=1):
                    result_entry2 = {
                        # "index": i,
                        "fileName": "**http://20.205.161.18/sfc/download/"+res.fileName.replace(".txt", "")+"**",
                        # "content": res.text
                    }
                    result_entry = {
                        "index": i,
                        "fileName": res.fileName,
                        "content": res.text
                    }
                    result_list2.append(result_entry2)
                    result_list.append(result_entry)
                # result_list2 去重
                if len(result_list2) > 0:
                    result_list2 = list({v['fileName']: v for v in result_list2}.values())
                prompt_in = '''
                You are a document analysis expert. Your task is to find the answer to the user's question based on the provided document knowledge points.
                Document Knowledge Points: +''' + re_str + '''
                User Question: ''' + chat_content_base + '''
                Requirements:
                1. Only answer based on the knowledge base without making any modifications or assumptions. Ensure the response is accurate and rigorous.
                2. Do not omit any details from the knowledge points.
                3. Do not repeat the question; directly provide the answer.
                4. Respond in English.
                5. If no relevant knowledge is found, respond with "The documents do not describe the relevant knowledge. Please contact the administrator to add to the knowledge base."
                6. If an answer is found, only use the statements from the documents to answer the question.
                7. Provide citations for all explanations, e.g. §1, §§2. Where § = section or reference.And add the fileName as well.
                8. Do not omit § in content and list them out.
                '''
                chat_msg.append({"role": "user", "content": prompt_in})

        if self.model == "qwen":
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(
                # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="qwen").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="qwen").model_url,
            )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="qwen-plus",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=False
            )
            content = str(completion.choices[0].message.content)
        elif self.model == "deepseek":
            print("！！！！！！！！！！！！！！！！！！！！！！！使用deepseek模型！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！")
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(  # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="deepseek").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="deepseek").model_url, )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="deepseek-chat",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=False
            )
            content = str(completion.choices[0].message.content)
        elif self.model == "gpt":
            print("！！！！！！！！！！！！！！！！！！！！！！！使用gpt模型！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！")
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            content = chat_gpt_azure_msg(chat_msg)
        print("答案reason！！！！！！！！！！！！！！！:", content)
        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "assistant", "content": content.encode('utf-8').decode('utf-8')},
            ensure_ascii=False)
        # result_list 转为string格式
        if content.encode('utf-8').decode('utf-8') == "文档中未描述相关知识请联系管理员添加知识库":
            session_create_entity.content_relevancy = ""
        else:
            result_list_str = json.dumps(result_list, ensure_ascii=False)
            # session_create_entity.content_relevancy = result_list_str
            session_create_entity.content_relevancy = ''
        session_msg_id = await session_create_entity.create()
        # 替换content中.txt 为”“
        content = content.replace(".txt", "")
        return {"reason": content, "from": result_list2}


    async def chat_rag_poc_rag_only(self):
        chat_content_base = self.content.encode("utf-8").decode("utf-8")
        result_list = []
        result_list2 = []
        import json
        session_create_entity = SessionMsgCreateBiz(session_id=self.session_id, content="")
        session_create_entity.session_id = self.session_id
        session_get_list = SessionMsgListBiz()
        session_get_list.session_id = self.session_id
        # 查询聊天记录的key
        historyList = await session_get_list.lst()
        # historyList 为空或则为none
        if historyList is None or len(historyList) == 0:
            session_create_entity.content = json.dumps({"role": "system", "content": "You are a helpful assistant."},
                                                       ensure_ascii=False)
            await session_create_entity.create()

        # 获取聊天记录
        chat_history = await session_get_list.lst()
        if len(chat_history) < self.history_num + 1:

            chat_msg = [json.loads(item["content"]) for item in chat_history]
            print("chat_msg:", chat_msg)
        else:

            first_element = chat_history[:1]

            last = -(self.history_num - 1)
            last_four_elements = chat_history[last:]

            # 拼接成新列表
            data = first_element + last_four_elements
            chat_msg = [json.loads(item["content"]) for item in data]

        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "user", "content": self.content.encode("utf-8").decode("utf-8")},
            ensure_ascii=False)
        await session_create_entity.create()

        # if self.collection is None or self.collection == "":
        #     chat_msg.append({"role": "user", "content": self.content})
        if True:
            # 压缩聊天记录
            import json
            # 获取chat_msg的最后二个元素重新构成列表
            if len(chat_msg) >= 2:
                chat_msg_question_change = chat_msg[-2:]
            else:
                chat_msg_question_change =chat_msg

#             chat_msg_history = json.dumps(chat_msg_question_change, ensure_ascii=False)
#             # 将chat_msg_history 中 Tribunal 全部替换为  Court
#             chat_msg_history = chat_msg_history.replace("Tribunal", "Court")
#             chat_msg_history = chat_msg_history.replace("tribunal", "court")
#             chat_msg_history_prompt_zip = '''
# You are a document analysis expert. Please analyze the following chat history and determine the user's intent based on the context of the chat history and the user's question. Finally, rewrite the user's question.
#
# The rewritten question should be a single sentence that clearly expresses the user's intent without requiring any context, making it suitable for vector matching and intent recognition.
#                     Chat history: ''' + chat_msg_history + '''
#                    User's question: ''' + self.content + '''
#                Only return the rewritten question, do not return any other content.
#                  '''
#             prompt_chat_msg_history_prompt_zip = chat_gpt_azure(chat_msg_history_prompt_zip)
#             print("压缩后的用户提问内容:", prompt_chat_msg_history_prompt_zip)
#
#             chat_content = prompt_chat_msg_history_prompt_zip
#             print("压缩后的用户提问内容！！！！！！！！！！！！！！！:", prompt_chat_msg_history_prompt_zip)
            chat_content =self.content.encode("utf-8").decode("utf-8").replace("Tribunal", "Court")
            chat_content =chat_content.replace("tribunal", "court")

        results = self.rag_search_milvus(searchName=chat_content, filter=None,
                                                 memory_name=self.collection, limit=10)
        re_str = ""
        i = 1
        for res in results:
                    res_after = "Knowledge Point: " + "File Name:" + res.fileName + "Content:" + res.text + "\n" + ";"
                    re_str = re_str + res_after
                    # fileNameList = fileNameList+str(i)+"." +"文件名"+ res.fileName +"内容："+res.text +"\n"
                    i = 1 + i

        for i, res in enumerate(results, start=1):
                    result_entry2 = {
                        # "index": i,
                        "fileName": "**http://20.205.161.18/sfc/download/"+res.fileName.replace(".txt", "")+"**",
                        # "content": res.text
                    }
                    result_entry = {
                        "index": i,
                        "fileName": res.fileName,
                        "content": res.text
                    }
                    result_list2.append(result_entry2)
                    result_list.append(result_entry)
                # result_list2 去重
        if len(result_list2) > 0:
                result_list2 = list({v['fileName']: v for v in result_list2}.values())
                prompt_in = '''
                You are a document analysis expert. Your task is to find the answer to the user's question based on the provided document knowledge points.
                Document Knowledge Points: +''' + re_str + '''
                User Question: ''' + chat_content_base + '''
                Requirements:
                1. Only answer based on the knowledge base without making any modifications or assumptions. Ensure the response is accurate and rigorous.
                2. Do not omit any details from the knowledge points.
                3. Do not repeat the question; directly provide the answer.
                4. Respond in English.
                5. If no relevant knowledge is found, respond with "The documents do not describe the relevant knowledge. Please contact the administrator to add to the knowledge base."
                6. If an answer is found, only use the statements from the documents to answer the question.
                7. Provide citations for all explanations, e.g. §1, §§2. Where § = section or reference.And add the fileName as well.
                8. Do not omit § in content and list them out.
                '''
                chat_msg.append({"role": "user", "content": prompt_in})

        if self.model == "qwen":
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(
                # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="qwen").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="qwen").model_url,
            )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="qwen-plus",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=False
            )
            content = str(completion.choices[0].message.content)
        elif self.model == "deepseek":
            print("！！！！！！！！！！！！！！！！！！！！！！！使用deepseek模型！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！")
            # 调用大模型返回内容并且存储聊天记录
            client = OpenAI(  # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
                api_key=get_model_config(model_name="deepseek").model_api_key,
                # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
                base_url=get_model_config(model_name="deepseek").model_url, )
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            completion = client.chat.completions.create(
                model="deepseek-chat",
                # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
                messages=chat_msg,
                stream=False
            )
            content = str(completion.choices[0].message.content)
        elif self.model == "gpt":
            print("！！！！！！！！！！！！！！！！！！！！！！！使用gpt模型！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！")
            if self.history_status:
                chat_msg = chat_msg
            else:
                # 如果不需要历史会话，只保留最后一条消息
                chat_msg = chat_msg[-1:]
            content = chat_gpt_azure_msg(chat_msg)
        print("答案reason！！！！！！！！！！！！！！！:", content)
        # 储存聊天记录
        session_create_entity.content = json.dumps(
            {"role": "assistant", "content": content.encode('utf-8').decode('utf-8')},
            ensure_ascii=False)
        # result_list 转为string格式
        if content.encode('utf-8').decode('utf-8') == "文档中未描述相关知识请联系管理员添加知识库":
            session_create_entity.content_relevancy = ""
        else:
            result_list_str = json.dumps(result_list, ensure_ascii=False)
            # session_create_entity.content_relevancy = result_list_str
            session_create_entity.content_relevancy = ''
        session_msg_id = await session_create_entity.create()
        # 替换content中.txt 为”“
        content = content.replace(".txt", "")
        return {"reason": content, "from": result_list2}