# GF_PY3_WORKFLOW_Document_Plagiarism_Check.py
# Create by GF 2025-04-21

# Python 3 Standard Libraries.
import os
# ..................................................
import docx
import pandas
import requests

# ##################################################

class Document_Plagiarism_Check(object):

    def Fuzzy_Match_String_Preprocess(self, ArgX:str) -> str:
    
        # 引用函数说明:
        # str.maketrans(): 创建字符映射表
        # - str.maketrans() 第 1 个字符串参数: 需要被替换的全角字符 (如 "Ａ, Ｂ, １, ２")
        # - str.maketrans() 第 2 个字符串参数: 对应的替换为半角字符 (如 "A, B, 1, 2")
        # s.translate(): 根据映射表执行字符替换
        #
        # 核心目标:
        # 将全角字符转换为半角：
        # - 全角英文字母 -> 半角英文字母
        # - 全角数字     -> 半角数字
        #
        # Example:
        # >>> orig_str = "Ｈｅｌｌｏ！１２３全角ＴＥＳＴ"
        # >>> Fuzzy_Match_String_Preprocess(orig_str)
        # "Hello！123全角TEST"
    
        # 全角转半角 + 大小写折叠 + 移除空格
        StrX = str(ArgX).translate(
                   str.maketrans(
                       "ＡＢＣＤＥＦＧＨＩＪＫＬＭＮＯＰＱＲＳＴＵＶＷＸＹＺａｂｃｄｅｆｇｈｉｊｋｌｍｎｏｐｑｒｓｔｕｖｗｘｙｚ０１２３４５６７８９",
                       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
                   )
               )
        # ..........................................
        Result = StrX.lower()
        Result = Result.replace(' ', '')
        # ..........................................
        return Result

    def Fuzzy_Match_by_Char_Num_Score(self, Text:str, Keys:str) -> float:
    
        # 模糊匹配 - 字符数量评分法 (返回匹配度评分)
        #
        # Args:
        # - Text: 待匹配的原始文本
        # - Keys: 搜索关键词
        # Returns:
        # - float: 匹配度 (0.0 - 1.0), 越接近 1 表示匹配度越高
        #
        # 匹配方式:
        # 遍历 Keys 中的每个字符,
        # 只要能在 Text 中找到 (按顺序), 就累加匹配数 m,
        # 若找不到, 则跳过该字符继续处理后续字符。
        #
        # 结果计算:
        # 最终得分基于匹配字符数 m 占 n 的比例, 直接反映匹配的字符比例。
        # n 取值为 Text 和 Keys 中字符数最多的那个。
    
        Processed_Text = self.Fuzzy_Match_String_Preprocess(Text)
        Processed_Keys = self.Fuzzy_Match_String_Preprocess(Keys)
    
        # ..........................................
    
        # 空的 Keys 匹配所有内容
        if not Processed_Keys:
            return 1.0
    
        # ..........................................
    
        m = 0  # 实际匹配的字符数
        curr_posi = 0
        for c in Processed_Keys:
            pos = Processed_Text.find(c, curr_posi)
            if pos != -1:
                m += 1
                curr_posi = pos + 1  # 确保顺序查找下一个字符
    
        k = len(Processed_Keys)
        if k == 0:
            return 0.0
    
        # 如果 Processed_Text 长度更长则 n 取 Processed_Text 的长度,
        # 如果 Processed_Keys 长度更长则 n 取 Processed_Keys 的长度。
        t = len(Processed_Text)
        n = t if t > k else k
    
        # 计算匹配字符数的比例
        match_ratio = m / n
    
        # ..........................................
    
        return round(match_ratio, 4)

    # ##############################################

    def Requests_POST_Stirling_PDF_API_PDF_to_Markdown(self, PDF_Path:str):
    
        Rqs_Headers = {"accept": "*/*"}
        # ..........................................
        Rqs_Files = {
            "fileInput": ("example.pdf", open(PDF_Path, mode="rb"), "application/pdf"),
            "outputFormat": (None, 'txt')
        }
        # ..........................................
        Stirling_PDF_URL = "10.47.167.233:8500"
        Stirling_PDF_API = f"http://{Stirling_PDF_URL}/api/v1/convert/pdf/markdown"
        # ..........................................
        response = requests.post(Stirling_PDF_API, headers=Rqs_Headers, files=Rqs_Files)
        # ..........................................
        return response.text

    # ##############################################

    def String_Clear_Head_Blank_Spaces(self, String_Content:str):
    
        # String Clear Head Blank Spaces
        # 字符串清理头部空格 (Blank Spaces)
    
        String = str(String_Content)
        # ..........................................
        while (String[0] == ' '):
            String = String[1:]
        # ..........................................
        return String

    def String_Split_into_List_by_Full_Stop(self, String_Content:str):
    
        # String Split into List by Full Stop
        # 通过句号 (Full Stop) 将字符串拆分到 List
    
        String = str(String_Content)
        # ..........................................
        String = String.replace("。", '.')      # -> 中文句号 (。) to 英文句点 (.)
        # ..........................................
        Text_Sentence_List = String.split('.')  # -> 按 (.) 将 Text 拆分为 Sentence
    
        return Text_Sentence_List

    def Text_Split_into_Pandas_Dataframe_by_Line_Break(self, Text_File_Path:str, Text_Content:str):
    
        # Text Split into Pandas Dataframe by Line Break
        # 通过换行符 (Line Break) 将文本拆分到 Pandas Dataframe
    
        String = str(Text_Content)
        # ..........................................
        String = String.replace("\r\n", '\n')     # -> Win32 换行符 to (\n)
        # ..........................................
        Text_Paragraph_List = String.split('\n')  # -> 按 (\n) 将 Text 拆分为 Paragraph
    
        # Combine as Pandas DataFrame.
        # ..........................................
        PandasDF = pandas.DataFrame({"paragraph": Text_Paragraph_List})
        # ..........................................
        PandasDF["document"] = os.path.basename(Text_File_Path)
    
        return PandasDF[["document", "paragraph"]]

    def Docx_Split_into_Pandas_Dataframe_by_Line_Break(self, Docx_File_Path:str):
    
        # Requirement: python-docx 1.1.2
    
        doc = docx.Document(Docx_File_Path)  # -> # 使用 python-docx 打开 .docx 文件
        # ..........................................
        Docx_Paragraph_List = []
        # ..........................................
        # Docx Paragraph List Example:
        #
        # >>> print(Docx_Paragraph_List)
        # ['',
        #  'Framework of Design Specification for China Petroleum EPC Project',
        #  '',
        #  '1. Project Overview and Design Objectives',
        #  '    * Project Background',
        #  '    EPC projects need to balance green, low-carbon, and efficient production capacity ...',
        #  ...]
        # ..........................................
        for Paragraph in doc.paragraphs:
            Docx_Paragraph_List.append(Paragraph.text)
    
        # Combine as Pandas DataFrame.
        # ..........................................
        PandasDF = pandas.DataFrame({"paragraph": Docx_Paragraph_List})
        # ..........................................
        PandasDF["document"] = os.path.basename(Docx_File_Path)
    
        return PandasDF[["document", "paragraph"]]

    def Document_Split_into_Pandas_Dataframe_by_Line_Break(self, Document_File_Path:str):
    
        if (".txt"  in Document_File_Path[-5:]):
            File_Obj = open(Document_File_Path, mode='r', encoding="utf-8")
            Text_Str = File_Obj.read()
            PandasDF = self.Text_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path, Text_Str)
            File_Obj.close()
        # ..........................................
        if (".docx" in Document_File_Path[-5:]):
            PandasDF = self.Docx_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path)
        # ..........................................
        if (".pdf"  in Document_File_Path[-5:]):
            Mkd_Text = self.Requests_POST_Stirling_PDF_API_PDF_to_Markdown(Document_File_Path)
            PandasDF = self.Text_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path, Mkd_Text)
        # ..........................................
        return PandasDF

    # ##############################################

    def Paragraph_of_Pandas_Plagiarism_Check(self, CSV_Database, Pandas_DataFrame):
    
        PandasDF_DB = pandas.read_csv(CSV_Database, dtype={"document": str, "paragraph": str})
        PandasDF_DB = PandasDF_DB[PandasDF_DB["paragraph"] != '']
        # ..........................................
        PandasDF = Pandas_DataFrame.copy()
        # ..........................................
        PandasDF["document" ] = PandasDF["document" ].astype("string")
        PandasDF["paragraph"] = PandasDF["paragraph"].astype("string")
        # ..........................................
        PandasDF = PandasDF[PandasDF["paragraph"] != '']
        # ..........................................
        def internal_function(pandas_dataframe_db, value):
            df = pandas_dataframe_db
            df["db_paragraph_sim"] = df["paragraph"].apply(lambda x: self.Fuzzy_Match_by_Char_Num_Score(x, value))
            index_of_max = df["db_paragraph_sim"].idxmax()
            return (df.loc[index_of_max, "document"], df.loc[index_of_max, "paragraph"], df.loc[index_of_max, "db_paragraph_sim"])
        # ..........................................
        PandasDF["temp"] = PandasDF["paragraph"].apply(lambda x: internal_function(PandasDF_DB, x))
        PandasDF["db_doc_of_paragraph"] = PandasDF["temp"].apply(lambda x: x[0])
        PandasDF["db_paragraph"       ] = PandasDF["temp"].apply(lambda x: x[1])
        PandasDF["db_paragraph_sim"   ] = PandasDF["temp"].apply(lambda x: x[2])
        PandasDF = PandasDF.drop("temp", axis=1)
        # ..........................................
        return PandasDF

    def Sentence_of_Pandas_Plagiarism_Check(self, CSV_Database, Pandas_DataFrame):
    
        PandasDF_DB = pandas.read_csv(CSV_Database, dtype={"document": str, "sentence": str})
        PandasDF_DB = PandasDF_DB[PandasDF_DB["sentence"] != '']
        # ..........................................
        PandasDF = Pandas_DataFrame.copy()
        # ..........................................
        PandasDF["document"] = PandasDF["document"].astype("string")
        PandasDF["sentence"] = PandasDF["sentence"].astype("string")
        # ..........................................
        PandasDF = PandasDF[PandasDF["sentence"] != '']
        # ..........................................
        def internal_function(pandas_dataframe_db, value):
            df = pandas_dataframe_db
            df["db_sentence_sim"] = df["sentence"].apply(lambda x: self.Fuzzy_Match_by_Char_Num_Score(x, value))
            index_of_max = df["db_sentence_sim"].idxmax()
            return (df.loc[index_of_max, "document"], df.loc[index_of_max, "sentence"], df.loc[index_of_max, "db_sentence_sim"])
        # ..........................................
        PandasDF["temp"] = PandasDF["sentence"].apply(lambda x: internal_function(PandasDF_DB, x))
        PandasDF["db_doc_of_sentence"] = PandasDF["temp"].apply(lambda x: x[0])
        PandasDF["db_sentence"       ] = PandasDF["temp"].apply(lambda x: x[1])
        PandasDF["db_sentence_sim"   ] = PandasDF["temp"].apply(lambda x: x[2])
        PandasDF = PandasDF.drop("temp", axis=1)
        # ..........................................
        return PandasDF

    # ##############################################

    def Workflow(self, Document_File_Path:str, Results_Save_Path:str) -> 1:
    
        # [1] Document Plagiarism Check -> Split by Line Break
        # [2] Document Plagiarism Check -> Filtering Datas
        # [3] Document Plagiarism Check -> Washing Datas
        # [4] Document Plagiarism Check -> Split Paragraphs into Sentences
        # [5] Document Plagiarism Check -> Paragraph Plagiarism Check
        # [6] Document Plagiarism Check -> Sentence Plagiarism Check
        # [7] Document Plagiarism Check -> Calculate Comprehensive Similarity
        # [8] Document Plagiarism Check -> Save The Results
    
        print("[Workflow] Starting Workflow -> Document Plagiarism Check ...")
        # ..........................................
        Document_DF = pandas.DataFrame()
        # ..........................................
        try:
            Document_DF = self.Document_Split_into_Pandas_Dataframe_by_Line_Break(Document_File_Path)
            print("[Workflow] Working in Document Plagiarism Check -> Split by Line Break (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Split by Line Break (0)")
            print(e)
        # ..........................................
        try:
            Document_DF = Document_DF[Document_DF["paragraph"] != ''  ]
            print("[Workflow] Working in Document Plagiarism Check -> Filtering Datas (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Filtering Datas (0)")
            print(e)
        # ..........................................
        try:
            Document_DF["paragraph"] = Document_DF["paragraph"].apply(lambda x: self.String_Clear_Head_Blank_Spaces(x))
            print("[Workflow] Working in Document Plagiarism Check -> Washing Datas (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Washing Datas (0)")
            print(e)
        # ..........................................
        try:
            Document_DF["sentence" ] = Document_DF["paragraph"].apply(lambda x: self.String_Split_into_List_by_Full_Stop(x))
            Document_DF = Document_DF.explode("sentence")
            print("[Workflow] Working in Document Plagiarism Check -> Split Paragraphs into Sentences (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Split Paragraphs into Sentences (0)")
            print(e)
        # ..........................................
        try:
            Result_DF = self.Paragraph_of_Pandas_Plagiarism_Check("./DPC.CSV.DB.csv", Document_DF)
            print("[Workflow] Working in Document Plagiarism Check -> Paragraph Plagiarism Check (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Paragraph Plagiarism Check (0)")
            print(e)
        # ..........................................
        try:
            Result_DF = self.Sentence_of_Pandas_Plagiarism_Check("./DPC.CSV.DB.csv", Result_DF)
            print("[Workflow] Working in Document Plagiarism Check -> Sentence Plagiarism Check (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Sentence Plagiarism Check (0)")
            print(e)
        # ..........................................
        try:
            Result_DF["average_sim"] = (Result_DF["db_paragraph_sim"] + Result_DF["db_sentence_sim"]) / 2
            print("[Workflow] Working in Document Plagiarism Check -> Calculate Comprehensive Similarity (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Calculate Comprehensive Similarity (0)")
            print(e)
        # ..........................................
        try:
            Result_DF.to_excel(Results_Save_Path, index=False)
            print("[Workflow] Working in Document Plagiarism Check -> Save The Results (1)")
        except Exception as e:
            print("[Workflow] Working in Document Plagiarism Check -> Save The Results (0)")
            print(e)
        # ..........................................
        print("[Workflow] Workflow Finished -> Document Plagiarism Check")
        # ..........................................
        return 1

# EOF Signed by GF.
