"""
title: GF OPEN WEBUI 0.6 Func AI Agent Document Comparison
author: Gou Feng
author_url: https://github.com/goufeng928
funding_url: https://github.com/goufeng928
version: 0.1
"""

from pydantic import BaseModel, Field
from typing import Optional
# ..................................................
# Custom Import Package
import os
import pandas
import requests

# ##################################################

class Filter:
    class Valves(BaseModel):
        priority: int = Field(
            default=0, description="Priority level for the filter operations."
        )
        max_turns: int = Field(
            default=12, description="Maximum allowable conversation turns for a user."
        )         # 8 => Default Value
        pass

    class UserValves(BaseModel):
        max_turns: int = Field(
            default=12, description="Maximum allowable conversation turns for a user."
        )         # 4 => Default Value
        pass

    def __init__(self):

        self.debug = False

        self.OpenAI_20250505_API_V1_Key:str = (
            ".........................................."
            # Memo: ".........................................."
        )
        self.OpenAI_20250505_API_V1_URL:str = "https://spark-api-open.xf-yun.com/v1/chat/completions"
        self.OpenAI_20250505_API_V1_Model:str = "4.0Ultra"

        self.Internals_API_5000_URL:str = (
            "http://host.docker.internal:5000"
            # Memo: "http://192.168.1.1:5000"
            # Memo: "http://host.docker.internal:5000"
        )

        self.inlet_messages:list       = []
        self.inlet_metadata:dict       = {}
        self.inlet_metadata_files:list = []  # body: {"metadata": {..., "files": None / [...], ...}

        # Indicates custom file handling logic. This flag helps disengage default routines in favor of custom
        # implementations, informing the WebUI to defer file-related operations to designated methods within this class.
        # Alternatively, you can remove the files directly from the body in from the inlet hook
        # self.file_handler = True

        # Initialize 'valves' with specific configurations. Using 'Valves' instance helps encapsulate settings,
        # which ensures settings are managed cohesively and not confused with operational flags like 'file_handler'.
        self.valves = self.Valves()
        pass

    def Requests_POST_Document_Comparison(self, File_Path_1:str, File_Path_2:str):

        URL = f"{self.Internals_API_5000_URL}/api/document-comparison"
        # ..........................................
        Rqs_Headers = {"Accept": "text/csv"}
        # ..........................................
        Rqs_Files   = {
            "file1": (os.path.basename(File_Path_1)[37:], open(File_Path_1, "rb")),
            "file2": (os.path.basename(File_Path_2)[37:], open(File_Path_2, "rb"))
        }
        # ..........................................
        response = requests.post(URL, headers=Rqs_Headers, files=Rqs_Files)
        # ..........................................
        with open("/app/backend/data/uploads/results.csv", "wb") as f:
            f.write(response.content)

    def inlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
        # Modify the request body or validate it before processing by the chat completion API.
        # This function is the pre-processor for the API where various checks on the input can be performed.
        # It can also modify the request before sending it to the API.
        if slef.debug == True:
            print(f"inlet:{__name__}")
            print(f"inlet:body:{body}")
            print(f"inlet:user:{__user__}")

        self.inlet_messages       = body.get("messages", [])
        self.inlet_metadata       = body.get("metadata", {})
        self.inlet_metadata_files = self.inlet_metadata["files"]
        # ..........................................
        files_count:int = 0
        # ..........................................
        if (self.inlet_metadata_files != None):
            files_count = len(self.inlet_metadata_files)
        # ..........................................
        print("[Func 调试] files count:", files_count)  # -> Debug by GF
        # ..........................................
        user_message = self.inlet_messages[-1]["content"]
        # 如果用户消息中包含的文件数 >= 2
        if (files_count >= 2 and ("比对" in user_message or "对比" in user_message) and \
                                 ("文档" in user_message or "文件" in user_message)):

            # 在 Docker 容器内, Open-WebUI 上传文件的路径应为:
            # - "/app/backend/data/uploads/f31c9a69-34d6-4b32-a42a-90ad00dad8e7_EXAMPLE.docx"
            open_webui_uploads  = "/app/backend/data/uploads"
            # ......................................
            # 在 Open-WebUI 0.6 中, 用户在会话窗口上传文件后, 文件的保存形式为 "文件 ID" + "文件名称"
            # 例如上传文件为:
            # - "EXAMPLE.docx"
            # 文件 ID 将会是:
            # - "f31c9a69-34d6-4b32-a42a-90ad00dad8e7"
            # 文件将会存储为:
            # - "/app/backend/data/uploads/f31c9a69-34d6-4b32-a42a-90ad00dad8e7_EXAMPLE.docx"
            # ......................................
            file_1_id = self.inlet_metadata_files[0]["file"]["id"]
            file_2_id = self.inlet_metadata_files[1]["file"]["id"]
            # ......................................
            file_1_name = self.inlet_metadata_files[0]["file"]["filename"]
            file_2_name = self.inlet_metadata_files[1]["file"]["filename"]
            # ......................................
            file_1_path = f"{open_webui_uploads}{os.sep}{file_1_id}_{file_1_name}"
            file_2_path = f"{open_webui_uploads}{os.sep}{file_2_id}_{file_2_name}"
            # ......................................
            print("[Func 调试] file takeover -> file 1 name: %s, file 1 path: %s" % (file_1_name, file_1_path))  # -> Debug by GF
            print("[Func 调试] file takeover -> file 2 name: %s, file 2 path: %s" % (file_2_name, file_2_path))  # -> Debug by GF

            self.Requests_POST_Document_Comparison(file_1_path, file_2_path)
            # ......................................
            df = pandas.read_csv("/app/backend/data/uploads/results.csv")
            # ......................................
            Overall_Avg_Sim = df["average_sim"].mean()
            # ......................................
            df = df.sample(10)
            #df = df.sort_values("average_sim", ascending=False).reset_index(drop=True)
            #df = df.head(10)
            # ......................................
            Data_Top_10_JSON = df.to_json(index=False, orient="records", force_ascii=False)

            body["messages"][-1] = {"role": "user", "content": "你正在帮助用户总结文档相似度检查结果数据, 并将文档相似度检查结果数据以 **| column_1 | column_2 | ... | column_n |** 形式返回给用户"}
            # ......................................
            body["messages"].append({"role": "assistant", "content": "好的, 我将以 **| column_1 | column_2 | ... | column_n |** 这种 Markdown 表格的形式响应用户"})
            body["messages"].append({"role": "user",      "content": "文档相似度检查结果数据中 paragraph_1 是 paragraph 最相似的段落, sentence_1 是 sentence 最相似的句子"})
            body["messages"].append({"role": "assistant", "content": "好的, paragraph_1 也可以叫 **最相似段落**, sentence_1 也可以叫 **最相似句子**"})
            body["messages"].append({"role": "user",      "content": "文档相似度检查结果数据中 paragraph_sim 是段落相似度, sentence_sim 是句子相似度, average_sim 是段落 + 句子的平均相似度"})
            body["messages"].append({"role": "assistant", "content": "好的, paragraph_sim 也可以叫 **段落相似度**, sentence_sim 也可以叫 **句子相似度**, average_sim 也可以叫 **平均相似度**"})
            body["messages"].append({"role": "user",      "content": "正在使用的文档相似度检查方法:\n遍历 B 文本中的每个字符, 只要能在 A 文本中找到 (按顺序), 就累加匹配数 m, 若找不到, 则跳过该字符继续处理后续字符, 最终得分基于匹配字符数 m 占 n 的比例, 直接反映匹配的字符比例, n 取值为 Text 和 Keys 中字符数最多的那个"})
            body["messages"].append({"role": "assistant", "content": "明白, 正在使用的文档相似度检查方法是: 字符数量匹配度评分法"})
            # ......................................
            User_Content = """文档相似度检查结果数据如下:\n""" \
                         + """[\n""" \
                         + """  {\n""" \
                         + """         "document": "文档 A.docx",\n""" \
                         + """        "paragraph": "2025中国石油EPC项目设计说明书框架",\n""" \
                         + """         "sentence": "EPC项目需兼顾绿色低碳与高效产能",\n""" \
                         + """       "document_1": "文档 B.docx",\n""" \
                         + """      "paragraph_1": "2025中国石油EPC项目联合体与分包管理",\n""" \
                         + """    "paragraph_sim": 0.619,\n""" \
                         + """       "sentence_1": "设立联合决策委员会, 重大事项需双方法人代表签字生效",\n""" \
                         + """     "sentence_sim": 0.12,\n""" \
                         + """      "average_sim": 0.3695\n""" \
                         + """  },\n""" \
                         + """  {\n""" \
                         + """         "document": "文档 A.docx",\n""" \
                         + """        "paragraph": "项目概述与设计目标",\n""" \
                         + """         "sentence": "实现全生命周期成本最优",\n""" \
                         + """       "document_1": "文档 B.docx",\n""" \
                         + """      "paragraph_1": "乌干达油田项目案例",\n""" \
                         + """    "paragraph_sim": 0.2222,\n""" \
                         + """       "sentence_1": "联合体权责明晰化",\n""" \
                         + """     "sentence_sim": 0.0,\n""" \
                         + """      "average_sim": 0.111\n""" \
                         + """  }\n""" \
                         + """]\n""" \
                         + """文档总体相似度 = (0.3695 + 0.111) / 2 = 0.2403"""
            body["messages"].append({"role": "user",      "content": User_Content})
            Assistant_Content = """文档相似度检查结果数据如下:\n""" \
                              + """| document    | paragraph                         | sentence                        | document_1   | 最相似段落                          | 段落相似度      | 最相似句子                                         | 句子相似度     | 平均相似度    |\n""" \
                              + """|:------------|:----------------------------------|:--------------------------------|:-------------|:------------------------------------|----------------:|:---------------------------------------------------|---------------:|--------------:|\n""" \
                              + """| 文档 A.docx | 2025中国石油EPC项目设计说明书框架 | EPC项目需兼顾绿色低碳与高效产能 | 文档 B.docx  | 2025中国石油EPC项目联合体与分包管理 |          0.619  | 设立联合决策委员会, 重大事项需双方法人代表签字生效 |           0.12 |        0.3695 |\n""" \
                              + """| 文档 A.docx | 项目概述与设计目标                | 实现全生命周期成本最优          | 文档 B.docx  | 乌干达油田项目案例                  |          0.2222 | 联合体权责明晰化                                   |           0    |        0.111  |\n""" \
                              + """两篇文档 (A.docx 与 B.docx) 的总体相似度为 24.03%, 属于较低相似水平, 表明文档内容差异性较大。"""
            body["messages"].append({"role": "assistant", "content": Assistant_Content})
            # ......................................
            User_Content = "文档相似度检查结果 TOP 10 **最相似部分** 数据如下 (检查结果需要以 Markdown 表格形式响应给用户):\n%s\n文档总体相似度: %.4f\n其它请求:\n%s" % (Data_Top_10_JSON, Overall_Avg_Sim, user_message)
            body["messages"].append({"role": "user", "content": User_Content})

        if __user__.get("role", "admin") in ["user", "admin"]:
            messages = body.get("messages", [])

            max_turns = min(__user__["valves"].max_turns, self.valves.max_turns)
            if len(messages) > max_turns:
                raise Exception(
                    f"Conversation turn limit exceeded. Max turns: {max_turns}"
                )

        return body

    def outlet(self, body: dict, __user__: Optional[dict] = None) -> dict:
        # Modify or analyze the response body after processing by the API.
        # This function is the post-processor for the API, which can be used to modify the response
        # or perform additional checks and analytics.
        if slef.debug == True:
            print(f"outlet:{__name__}")
            print(f"outlet:body:{body}")
            print(f"outlet:user:{__user__}")

        return body

# Signed by GF.
