# api_5102.py
# Create by GF 2025-05-10 23:51

# Python 3 Standard Libraries.
import io
import logging
import os
import random
# ..................................................
import flask
# ..................................................
# Get File Manager Information Required Libraries.
import requests
# ..................................................
# Document Comparison Required Libraries.
import docx
import fitz
import pandas
from paddleocr import PaddleOCR
# ..................................................
import proxy_openai_api_202505

# ##################################################

WWW_HTML_BASE_URL              = "http://172.16.160.144"
WWW_HTML_UPLOADS_DIR           = "/www/html/uploads"
PADDLEOCR_EXTRACTS_TIMES_LIMIT = 30  # PaddleOCR Extracts Too Many Times in a Single Task, Which Can Lead to Excessive Memory Usage and Ultimately Result in Memory Overflow.
SIMILAR_TO_OPENAI_API_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
                               # "https://spark-api-open.xf-yun.com/v1"
                               # "https://dashscope.aliyuncs.com/compatible-mode/v1"

# ##################################################

LLM_MESSAGE_LIST_DOCUMENT_COMPARISON = [

    {"role": "user",      "content": "MIDDLEWARE\n你好, 我是处理文件的中间件, 我以用户消息的方式和你共享信息, 用户看不到我的消息, 只有你能看到"},
    {"role": "assistant", "content": "好的, 我将采纳你提供的消息, 为用户提供更加有效的响应"},
    {"role": "user",      "content": "MIDDLEWARE\n如果你不能处理文件, 我将帮助你处理文件, 并将处理结果响应给你"},
    {"role": "assistant", "content": "好的, 我将以你的处理结果为准, 将文件处理结果响应给用户"},
    {"role": "user",      "content": "MIDDLEWARE\n用户可以通过 [文件上传入口](%s/filemanager.php) 上传文件, 访问该页面并点击右上角的 Upload, 单个文件大小限制为 50MB" % WWW_HTML_BASE_URL},
    {"role": "assistant", "content": "好的, 当用户有文件处理需求时, 我将返回 [文件上传入口](%s/filemanager.php) 让用户访问该链接, 并点击右上角的 Upload 上传文件, 同时告知用户单个文件大小限制为 50MB" % WWW_HTML_BASE_URL}
]

# ##################################################

def Text_Opr_Content_in_Mark_Extract(Text:str, Mark_Start:str, Mark_End:str, With_Mark:int=1) -> str:

    # Text Operation - Extract The Content in Mark
    #
    # >>> Text = "Here is The Code:\n\n<CODE_START>\nDESCRIBE example_table;\n<CODE_END>"
    # >>> print(Text)
    # Here is The Code:
    #
    # <CODE_START>
    # DESCRIBE example_table;
    # <CODE_END>
    # >>>
    # >>> Content_in_Mark = Text_Opr_Content_in_Mark_Extract(Text, "<CODE_START>\n", "<CODE_END>", 1)
    # >>> print(Content_in_Mark)
    # <CODE_START>
    # DESCRIBE example_table;
    # <CODE_END>
    # >>> Content_in_Mark = Text_Opr_Content_in_Mark_Extract(Text, "<CODE_START>\n", "<CODE_END>", 0)
    # >>> print(Content_in_Mark)
    # DESCRIBE example_table;

    Capture:list = [0, 0]
    Founded:list = [0, 0]
    Content_in_Mark:str = ''
    # ..............................................
    for i in range(0, len(Text), 1):
        Slice_Idx_A = i
        Slice_Idx_B = i + len(Mark_Start)
        if (Text[Slice_Idx_A:Slice_Idx_B] == Mark_Start):
            Capture[0] = Slice_Idx_A
            Founded[0] = 1
            break
    # ..............................................
    if (Founded[0] == 0):  # If The "Mark Start" is not Found, Return "Empty Char".
        return Content_in_Mark
    # ..............................................
    for i in range((Capture[0] + len(Mark_Start)), len(Text), 1):
        Slice_Idx_A = i
        Slice_Idx_B = i + len(Mark_End)
        if (Text[Slice_Idx_A:Slice_Idx_B] == Mark_End):
            Capture[1] = Slice_Idx_B
            Founded[1] = 1
            break
    # ..............................................
    if (With_Mark == 0):
        Capture[0] = Capture[0] + len(Mark_Start)
        Capture[1] = Capture[1] - len(Mark_End)
    # ..............................................
    Content_in_Mark = Text[Capture[0]:Capture[1]]
    # ..............................................
    return Content_in_Mark

# ##################################################

class Document_Comparison(object):

    def Fuzzy_Match_String_Preprocess(self, ArgX:str) -> str:

        # 引用函数说明:
        # str.maketrans(): 创建字符映射表
        # - str.maketrans() 第 1 个字符串参数: 需要被替换的全角字符 (如 "Ａ, Ｂ, １, ２")
        # - str.maketrans() 第 2 个字符串参数: 对应的替换为半角字符 (如 "A, B, 1, 2")
        # s.translate(): 根据映射表执行字符替换
        #
        # 核心目标:
        # 将全角字符转换为半角：
        # - 全角英文字母 -> 半角英文字母
        # - 全角数字     -> 半角数字
        #
        # Example:
        # >>> orig_str = "Ｈｅｌｌｏ！１２３全角ＴＥＳＴ"
        # >>> Fuzzy_Match_String_Preprocess(orig_str)
        # "Hello！123全角TEST"

        # 全角转半角 + 大小写折叠 + 移除空格
        StrX = str(ArgX).translate(
                   str.maketrans(
                       "ＡＢＣＤＥＦＧＨＩＪＫＬＭＮＯＰＱＲＳＴＵＶＷＸＹＺａｂｃｄｅｆｇｈｉｊｋｌｍｎｏｐｑｒｓｔｕｖｗｘｙｚ０１２３４５６７８９",
                       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
                   )
               )
        # ..........................................
        Result = StrX.lower()
        Result = Result.replace(' ', '')
        # ..........................................
        return Result

    def Fuzzy_Match_by_Char_Num_Score(self, Text:str, Keys:str) -> float:

        # 模糊匹配 - 字符数量评分法 (返回匹配度评分)
        #
        # Args:
        # - Text: 待匹配的原始文本
        # - Keys: 搜索关键词
        # Returns:
        # - float: 匹配度 (0.0 - 1.0), 越接近 1 表示匹配度越高
        #
        # 匹配方式:
        # 遍历 Keys 中的每个字符,
        # 只要能在 Text 中找到 (按顺序), 就累加匹配数 m,
        # 若找不到, 则跳过该字符继续处理后续字符。
        #
        # 结果计算:
        # 最终得分基于匹配字符数 m 占 n 的比例, 直接反映匹配的字符比例。
        # n 取值为 Text 和 Keys 中字符数最多的那个。

        Processed_Text = self.Fuzzy_Match_String_Preprocess(Text)
        Processed_Keys = self.Fuzzy_Match_String_Preprocess(Keys)

        # ..........................................

        # 空的 Keys 匹配所有内容
        if not Processed_Keys:
            return 1.0

        # ..........................................

        m = 0  # 实际匹配的字符数
        curr_posi = 0
        for c in Processed_Keys:
            pos = Processed_Text.find(c, curr_posi)
            if pos != -1:
                m += 1
                curr_posi = pos + 1  # 确保顺序查找下一个字符

        k = len(Processed_Keys)
        if k == 0:
            return 0.0

        # 如果 Processed_Text 长度更长则 n 取 Processed_Text 的长度,
        # 如果 Processed_Keys 长度更长则 n 取 Processed_Keys 的长度。
        t = len(Processed_Text)
        n = t if t > k else k

        # 计算匹配字符数的比例
        match_ratio = m / n

        # ..........................................

        return round(match_ratio, 4)

    # ##############################################

    def String_Split_into_List_by_Full_Stop(self, String_Content:str):

        # String Split into List by Full Stop
        # 通过 "句号/句点 (Full Stop)" 将字符串拆分到 List

        String = str(String_Content)
        # ..........................................
        String = String.replace("。", '\n')         # -> 中文句号 (。) to 换行符 (\n)
        # ..........................................
        if (String != '' and String[-1] == '.'):    # -> 英文句点 (. ) to 换行符 (\n)
            String_Array = [i for i in String]
            String_Array[-1] = '\n'
            String = str('').join(String_Array)
        # ..........................................
        Segmented_String_List = String.split('\n')  # -> 按 (\n) 将 Text 拆分

        return Segmented_String_List

    def Text_Split_into_Pandas_Dataframe_by_Line_Break(self, Text_File_Path:str):

        # Text Split into Pandas Dataframe by Line Break
        # 通过换行符 (Line Break) 将文本拆分到 Pandas Dataframe

        File_Obj = open(Text_File_Path, mode='r', encoding="utf-8")
        Text_Str = File_Obj.read()
        File_Obj.close()

        Text_Str = Text_Str.replace("\r\n", '\n')   # -> Win32 换行符 to (\n)
        # ..........................................
        Text_Paragraph_List = Text_Str.split('\n')  # -> 按 (\n) 将 Text 拆分为 Paragraph

        # Combine as Pandas DataFrame.
        # ..........................................
        PandasDF = pandas.DataFrame({"paragraph": Text_Paragraph_List})
        # ..........................................
        PandasDF["document"] = os.path.basename(Text_File_Path)

        return PandasDF[["document", "paragraph"]]

    def Docx_Split_into_Pandas_Dataframe_by_Line_Break(self, Docx_File_Path:str):

        # Requirement: python-docx 1.1.2

        doc = docx.Document(Docx_File_Path)  # -> # 使用 python-docx 打开 .docx 文件
        # ..........................................
        Docx_Paragraph_List = []
        # ..........................................
        # Docx Paragraph List Example:
        #
        # >>> print(Docx_Paragraph_List)
        # ['',
        #  'Framework of Design Specification for China Petroleum EPC Project',
        #  '',
        #  '1. Project Overview and Design Objectives',
        #  '    * Project Background',
        #  '    EPC projects need to balance green, low-carbon, and efficient production capacity ...',
        #  ...]
        # ..........................................
        for Paragraph in doc.paragraphs:
            Docx_Paragraph_List.append(Paragraph.text)

        # Combine as Pandas DataFrame.
        # ..........................................
        PandasDF = pandas.DataFrame({"paragraph": Docx_Paragraph_List})
        # ..........................................
        PandasDF["document"] = os.path.basename(Docx_File_Path)

        return PandasDF[["document", "paragraph"]]

    def PDF_Split_into_Pandas_Dataframe_by_Line_Break(self, PDF_File_Path:str):
        
        ocr_model = PaddleOCR(use_angle_cls=True, lang="ch", use_gpu=False)
        
        # 关闭 PaddleOCR 相关的日志信息
        # paddleocr_logger = logging.getLogger('ppocr')
        # paddleocr_logger.disable(logging.DEBUG)    # 关闭 DEBUG 日志的打印 (关闭后可能无法运行)
        
        pdf_file = fitz.open(PDF_File_Path)
        page_num = len(pdf_file)
        # ..........................................
        pdf_ctnt_list = []
        random_pages = random.sample(list(range(0, page_num)), PADDLEOCR_EXTRACTS_TIMES_LIMIT)
        # ..........................................
        for i in range(0, page_num):
        
            page           = pdf_file[i]
            text_in_page   = page.get_text()
            images_in_page = page.get_images(full=True)
        
            # >>> page.get_images(full=True)
            # [(  8, 0, 793, 1122, 8, 'DeviceRGB', '',   'IM8', 'FlateDecode', 0),
            #  ......
            #  (101, 0, 984, 1566, 8, 'DeviceRGB', '', 'IM101', 'FlateDecode', 0)]
        
            pdf_ctnt_list.append(text_in_page)

            if (i in random_pages):  # 随机提取包含图像的不重复页面 (控制 PaddleOCR 识别次数)
                for idx, img in enumerate(images_in_page):
                    
                    # >>> for idx, img in enumerate(images_in_page):
                    # >>>     print(idx, img)
                    # 0 ( 8, 0, 793, 1122, 8, 'DeviceRGB', '',  'IM8', 'FlateDecode', 0)
                    # 1 (21, 0, 793, 1122, 8, 'DeviceRGB', '', 'IM21', 'FlateDecode', 0)
                    # ......
                
                    base_img = pdf_file.extract_image(img[0])
                    byts_img = base_img["image"]

                    ocr_text_list = ocr_model.ocr(byts_img, cls=True)
                    if (ocr_text_list[0] != None):  # If Content Cannot be Extracted, Return => [None]
                        ocr_text_list = [t[1][0] for t in ocr_text_list[0]]
                        pdf_ctnt_list.extend(ocr_text_list)
        # ..........................................
        pdf_ctnt_list = [i.replace('\n', '') for i in pdf_ctnt_list]

        # Combine as Pandas DataFrame.
        # ..........................................
        PandasDF = pandas.DataFrame({"paragraph": pdf_ctnt_list})
        # ..........................................
        PandasDF["document"] = os.path.basename(PDF_File_Path)

        return PandasDF[["document", "paragraph"]]
    
    def Document_Split_into_Pandas_Dataframe_by_Line_Break(self, Document_File_Path:str):

        if (".txt"  in Document_File_Path[-5:]):
            PandasDF = self.Text_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path)
        # ..........................................
        if (".docx" in Document_File_Path[-5:]):
            PandasDF = self.Docx_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path)
        # ..........................................
        if (".pdf"  in Document_File_Path[-5:]):
            PandasDF = self.PDF_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path)
        # ..........................................
        return PandasDF

    # ##############################################

    def Paragraph_of_Pandas_Document_Comparison(self, Pandas_DataFrame_1, Pandas_DataFrame_2):

        PandasDF_1 = Pandas_DataFrame_1.copy()
        PandasDF_2 = Pandas_DataFrame_2.copy()
        # ..........................................
        PandasDF_2["document" ] = PandasDF_2["document" ].astype("string")
        PandasDF_2["paragraph"] = PandasDF_2["paragraph"].astype("string")
        # ..........................................
        PandasDF_2 = PandasDF_2[PandasDF_2["paragraph"] != '']
        # ..........................................
        def internal_function(pandas_dataframe_01, value):
            df = pandas_dataframe_01.reset_index(drop=True) # 重置数据帧索引, 避免重复索引导致 Series 返回
            df["paragraph_sim"] = df["paragraph"].apply(lambda x: self.Fuzzy_Match_by_Char_Num_Score(x, value))
            index_of_max = df["paragraph_sim"].idxmax()
            return (df.loc[index_of_max, "document"], df.loc[index_of_max, "paragraph"], df.loc[index_of_max, "paragraph_sim"])
        # ..........................................
        PandasDF_2["temp"] = PandasDF_2["paragraph"].apply(lambda x: internal_function(PandasDF_1, x))
        PandasDF_2["document_1"   ] = PandasDF_2["temp"].apply(lambda x: x[0])
        PandasDF_2["paragraph_1"  ] = PandasDF_2["temp"].apply(lambda x: x[1])
        PandasDF_2["paragraph_sim"] = PandasDF_2["temp"].apply(lambda x: x[2])
        PandasDF_2 = PandasDF_2.drop("temp", axis=1)
        # ..........................................
        return PandasDF_2

    def Sentence_of_Pandas_Document_Comparison(self, Pandas_DataFrame_1, Pandas_DataFrame_2):

        PandasDF_1 = Pandas_DataFrame_1.copy()
        PandasDF_2 = Pandas_DataFrame_2.copy()
        # ..........................................
        PandasDF_2["document"] = PandasDF_2["document"].astype("string")
        PandasDF_2["sentence"] = PandasDF_2["sentence"].astype("string")
        # ..........................................
        PandasDF_2 = PandasDF_2[PandasDF_2["sentence"] != '']
        # ..........................................
        def internal_function(pandas_dataframe_01, value):
            df = pandas_dataframe_01.reset_index(drop=True) # 重置数据帧索引, 避免重复索引导致 Series 返回
            df["sentence_sim"] = df["sentence"].apply(lambda x: self.Fuzzy_Match_by_Char_Num_Score(x, value))
            index_of_max = df["sentence_sim"].idxmax()
            return (df.loc[index_of_max, "document"], df.loc[index_of_max, "sentence"], df.loc[index_of_max, "sentence_sim"])
        # ..........................................
        PandasDF_2["temp"] = PandasDF_2["sentence"].apply(lambda x: internal_function(PandasDF_1, x))
        PandasDF_2["document_1"  ] = PandasDF_2["temp"].apply(lambda x: x[0])
        PandasDF_2["sentence_1"  ] = PandasDF_2["temp"].apply(lambda x: x[1])
        PandasDF_2["sentence_sim"] = PandasDF_2["temp"].apply(lambda x: x[2])
        PandasDF_2 = PandasDF_2.drop("temp", axis=1)
        # ..........................................
        return PandasDF_2

    # ----------------------------------------------

    def Workflow(self, Document_File_Path_1:str, Document_File_Path_2:str) -> str:

        # [1] Document Document Comparison -> Split by Line Break
        # [2] Document Document Comparison -> Filtering Datas
        # [3] Document Document Comparison -> Washing Datas
        # [4] Document Document Comparison -> Split Paragraphs into Sentences
        # [5] Document Document Comparison -> Paragraph Comparison
        # [6] Document Document Comparison -> Sentence Comparison
        # [7] Document Document Comparison -> Calculate Comprehensive Similarity
        # [8] Document Document Comparison -> Save The Results

        Document_DF_1 = pandas.DataFrame()
        Document_DF_2 = pandas.DataFrame()
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Split by Line Break")
        Document_DF_1 = self.Document_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path_1)
        Document_DF_2 = self.Document_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path_2)
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Filtering Datas")
        Document_DF_1 = Document_DF_1[Document_DF_1["paragraph"] != ''  ]
        Document_DF_2 = Document_DF_2[Document_DF_2["paragraph"] != ''  ]
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Washing Datas")
        Document_DF_1["paragraph"] = Document_DF_1["paragraph"].apply(lambda x: str(x).lstrip().rstrip())
        Document_DF_2["paragraph"] = Document_DF_2["paragraph"].apply(lambda x: str(x).lstrip().rstrip())
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Split Paragraphs into Sentences")
        Document_DF_1["sentence" ] = Document_DF_1["paragraph"].apply(lambda x: self.String_Split_into_List_by_Full_Stop(x))
        Document_DF_1 = Document_DF_1.explode("sentence")
        Document_DF_2["sentence" ] = Document_DF_2["paragraph"].apply(lambda x: self.String_Split_into_List_by_Full_Stop(x))
        Document_DF_2 = Document_DF_2.explode("sentence")
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Paragraph Comparison")
        Result_DF = self.Paragraph_of_Pandas_Document_Comparison(Document_DF_1, Document_DF_2)
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Sentence Comparison")
        Result_DF = self.Sentence_of_Pandas_Document_Comparison(Document_DF_1, Result_DF)
        # ..........................................
        print("[Workflow] Working in Document Comparison -> Calculate Comprehensive Similarity")
        Result_DF["average_sim"] = (Result_DF["paragraph_sim"] + Result_DF["sentence_sim"]) / 2
        # ..........................................
        Overall_Avg_Sim = Result_DF["average_sim"].mean()
        Result_DF = Result_DF.rename(columns={
            "document":      "文档 A",
            "paragraph":     "文档 A 段落",
            "sentence":      "文档 A 句子",
            "document_1":    "文档 B",
            "paragraph_1":   "文档 B 段落",
            "paragraph_sim": "段落相似度",
            "sentence_1":    "文档 B 句子",
            "sentence_sim":  "句子相似度",
            "average_sim":   "平均相似度"
        })
        Result_DF = Result_DF.sort_values("平均相似度", ascending=False).sample(30)
        JSON_Data = Result_DF.to_json(index=False, orient="records", force_ascii=False)
        # ..........................................
        return (JSON_Data, Overall_Avg_Sim)

# ##################################################

app = flask.Flask(__name__)

# ##################################################

@app.route("/v1", methods=["GET"])
def api_proxy_openai_v1():

    return '', 200

@app.route("/v1/favicon.ico", methods=["GET"])
def api_proxy_openai_v1_favicon_ico():

    return '', 200

@app.route("/v1/models", methods=["GET"])
def api_proxy_openai_v1_models():

    received_api_key = flask.request.headers.get("Authorization", '')
    # ..............................................
    if (received_api_key == ''):
        return flask.jsonify({"error": "missing api key"}), 401
    # ..............................................
    this_proxy_openai_api = proxy_openai_api_202505.proxy_openai_api()
    this_proxy_openai_api.url.update(f"{SIMILAR_TO_OPENAI_API_BASE_URL}/models")
    this_proxy_openai_api.headers.update({"Authorization": received_api_key}).update({"Content-Type": "application/json"})
    # ..............................................
    rps_dict = this_proxy_openai_api.get()
    # ..............................................
    try:
        # 添加自定义模型名称
        rps_dict["data"].append({"id":"assistant-doccmp", "object":"model", "created":1744009868, "owned_by":"system"})
    except Exception as e:
        print("[Debug] error:", str(e))
    # ..............................................
    return flask.jsonify(rps_dict), 200

@app.route("/v1/chat/completions", methods=["POST"])
def api_proxy_openai_v1_chat_completions():

    # Example:
    # - curl -H "Content-Type: application/json" \
    # -      -H "Authorization: Bearer please-enter-openai-api-key" \
    # -      -X POST \
    # -      -d '{"messages": [{"role": "user", "content": "Can you here me?"}]}' \
    # -      http://127.0.0.1:5001/v1/chat/completions
    # Returns:
    # - {'code': 0,
    # -  'message': 'Success',
    # -  'sid': 'cha000bc275@dx196bad449ab9a4b532',
    # -  'choices': [{'message': {'role': 'assistant', 'content': 'Can I help you?'},
    # -               'index': 0}],
    # -  'usage': {'prompt_tokens': 5, 'completion_tokens': 5, 'total_tokens': 10}}

    received_api_key = flask.request.headers.get("Authorization", '')
    # ..............................................
    if (received_api_key == ''):
        return flask.jsonify({"error": "missing api key"}), 401
    # ..............................................
    received_model    = flask.request.json.get("model",    '')
    received_messages = flask.request.json.get("messages", [])
    received_stream   = flask.request.json.get("stream",   False)
    # ..............................................
    # 忽略 JSON 参数中的 "Authorization", "model" (强制调用特定模型)
#   received_api_key = "Bearer NXuRvfuyBjfsOHqtAyRI:NXuRvfuyBjfsOHqtAyRI"
#   received_model   = "4.0Ultra"
    # ..............................................
    try:
        this_proxy_openai_api = proxy_openai_api_202505.proxy_openai_api()
        this_proxy_openai_api.url.update(f"{SIMILAR_TO_OPENAI_API_BASE_URL}/chat/completions")
        this_proxy_openai_api.headers.update({"Authorization": received_api_key}).update({"Content-Type": "application/json"})
        this_proxy_openai_api.json.update({"model":  received_model})
        this_proxy_openai_api.json.update({"stream": received_stream})
        # ..........................................
        history_messages:list = []
        # ..........................................
        for message in LLM_MESSAGE_LIST_DOCUMENT_COMPARISON:
            history_messages.append(message)
        # ..........................................
        response = requests.get(f"{WWW_HTML_BASE_URL}/filemanager.php?query=files")
        rps_text = response.text
        history_messages.append({"role": "user",      "content": "MIDDLEWARE\n这是我现在拥有的文件:\n%s\n如果用户需要, 你最好能够以 | column_1 | column_2 | ... | column_n | (Markdown 表格) 的形式返回给用户" % rps_text})
        history_messages.append({"role": "assistant", "content": "好的, 如果用户想处理的文件不在你的这个 JSON 中, 我将提醒用户通过 [文件上传入口](%s/filemanager.php) 上传" % WWW_HTML_BASE_URL})
        # ..........................................
        history_messages.append({"role": "user",      "content": "MIDDLEWARE\n我可以为用户提供文档比对查重服务, 需要用户选定 2 个文件并放在 [] 中, 例如 [文件_A.docx, 文件_B.pdf], 并附上 \"比对/对比/查重\" 关键字"})
        history_messages.append({"role": "assistant", "content": "好的, 如果用户想要对已上传的文件进行比对查重, 我会让用户将选定的文件放在 [] 中并附上 \"比对/对比/查重\" 关键字发送给我, 并给出示例"})
        history_messages.append({"role": "user",      "content": "MIDDLEWARE\n文档比对规则:\n总体相似度 > 0.20, 则存在抄袭嫌疑"})
        history_messages.append({"role": "assistant", "content": "好的, 如果检测到文档的总体相似度 > 0.20, 则判定为存在抄袭嫌疑"})
        if (   ('[' in received_messages[-1]["content"] and    ']' in received_messages[-1]["content"]) and \
            ("对比" in received_messages[-1]["content"]  or "比对" in received_messages[-1]["content"]  or "查重" in received_messages[-1]["content"])):
            confirmed_files_list = Text_Opr_Content_in_Mark_Extract(received_messages[-1]["content"], '[', ']', With_Mark=0)
            confirmed_files_list = confirmed_files_list.split(',')
            confirmed_files_list = [file.lstrip() for file in confirmed_files_list]  # 清除文件名左侧空格
            confirmed_files_list = [file.rstrip() for file in confirmed_files_list]  # 清除文件名右侧空格
            Doc_Cmp = Document_Comparison()
            Cmp_Res = Doc_Cmp.Workflow(f"{WWW_HTML_UPLOADS_DIR}/{confirmed_files_list[0]}", f"{WWW_HTML_UPLOADS_DIR}/{confirmed_files_list[1]}")
            history_messages.append({"role": "user",      "content": "MIDDLEWARE\n看起来用户已经选定了 %s 这些文件进行比对查重" % str(confirmed_files_list)})
            history_messages.append({"role": "assistant", "content": "好的, 那么检测结果是什么"})
            history_messages.append({"role": "user",      "content": "MIDDLEWARE\n这是两个文档的检测结果:\n%s\n总体相似度:\n%.4f" % (Cmp_Res[0], Cmp_Res[1])})
            history_messages.append({"role": "assistant", "content": "好的, 我会将总体相似度告知用户, 如果你的检测结果包含 JSON 我也会将 JSON 以 | column_1 | column_2 | ... | column_n | (Markdown 表格) 形式响应给用户"})
        # ..........................................
        for message in received_messages:
            history_messages.append(message)
        # ..........................................
        this_proxy_openai_api.json.update({"messages": history_messages})
        # ..........................................
        if (received_stream == True):
            rps_iter = this_proxy_openai_api.post()
            # 流式响应处理
            def generate():
                for chunk in rps_iter:
                    if chunk:
                        yield chunk
            # ......................................
            # OpenAI API 202505 的流式响应通常使用 Server-Sent Events (SSE),
            # 即 text/event-stream 类型,
            # 每个数据块以 "data: " 开头, 后跟 JSON 数据, 并以两个换行符结束
            return flask.Response(generate(), content_type="text/event-stream")
        else:
            rps_dict = this_proxy_openai_api.post()
            # ......................................
            return flask.jsonify(rps_dict), 200
    except Exception as e:
        print("[Debug] error:", str(e))
        return flask.jsonify({"error": str(e)}), 500

if __name__ == '__main__':

#   app.run(host="0.0.0.0", port=5102, debug=True)  # 调试模式
    app.run(host="0.0.0.0", port=5102)

# EOF Signed by GF.
