# coding:utf-8
import sys
import os
import shutil
import re
import traceback
import docx
import copy
import logging
from docx import Document
from zipfile import ZipFile
from datetime import datetime
import docx.document
from lxml import etree as ET
from openai import OpenAI


DEFAULT_NS = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'
NS_MAP = {"w": DEFAULT_NS}

ELEMENT_TAG_R = '{'+f"{NS_MAP["w"]}"+'}r'
ELEMENT_TAG_T = '{'+f"{NS_MAP["w"]}"+'}t'
ELEMENT_TAG_SDT = '{'+f"{NS_MAP["w"]}"+'}sdt'
ELEMENT_TAG_SDTCONTENT = '{'+f"{NS_MAP["w"]}"+'}sdtContent'
ELEMENT_TAG_BKSTART = '{'+f"{NS_MAP["w"]}"+'}bookmarkStart'
ELEMENT_TAG_BKEND = '{'+f"{NS_MAP["w"]}"+'}bookmarkEnd'
ELEMENT_ATTR_NAME = '{'+f"{NS_MAP["w"]}"+'}name'
ELEMENT_ATTR_ID = '{'+f"{NS_MAP["w"]}"+'}id'
 

OUTPUT_SEPARATOR = "-----"
openai_client = OpenAI(base_url="https://wishub-x1.ctyun.cn/v1", api_key="447a4e13effc4753992b4f2d18fae1fe")


class CommentText:
    INSPECT_TYPE         = "错误类型："
    INSPECT_CONTENT      = "错误内容："
    INSPECT_SUGGESTION   = "修改建议："
    INSPECT_REASON       = "错误原因："
    
    COMMENT_TEXT_FORMATE_TEMPLATE = '<w:p><w:pPr><w:pStyle w:val="2"/></w:pPr><w:r><w:t>{}</w:t></w:r></w:p>' \
                                    '<w:p><w:pPr><w:pStyle w:val="2"/></w:pPr><w:r><w:t>{}</w:t></w:r></w:p>' \
                                    '<w:p><w:pPr><w:pStyle w:val="2"/></w:pPr><w:r><w:t>{}</w:t></w:r></w:p>' \
                                    '<w:p><w:pPr><w:pStyle w:val="2"/></w:pPr><w:r><w:t>{}</w:t></w:r></w:p>' 


    def __init__(self, content, context, suggestion, reason = "", type = "错别字"):
        self.__inspect_type__       = type
        self.__inspect_content__    = content
        self.__inspect_context__    = context
        self.__inspect_suggestion__ = suggestion
        self.__inspect_reason__     = reason


    @classmethod
    def from_string(cls, inspect_result):
        type_offset         = inspect_result.find(cls.INSPECT_TYPE)
        content_offset      = inspect_result.find(cls.INSPECT_CONTENT)
        suggestion_offset   = inspect_result.find(cls.INSPECT_SUGGESTION)
        reason_offset       = inspect_result.find(cls.INSPECT_REASON)

        if type_offset != -1 and content_offset != -1:
            type = inspect_result[type_offset+len(cls.INSPECT_TYPE):content_offset]
        if content_offset != -1 and suggestion_offset != -1:
            content = inspect_result[content_offset+len(cls.INSPECT_CONTENT):suggestion_offset]
        if suggestion_offset != -1 and reason_offset != -1:
            suggestion = inspect_result[suggestion_offset+len(cls.INSPECT_SUGGESTION):reason_offset]            
        if reason_offset != -1:
            reason = inspect_result[reason_offset+len(cls.INSPECT_REASON):len(inspect_result)]

        return cls(type, content, suggestion, reason)
    

    def format(self):
        return self.COMMENT_TEXT_FORMATE_TEMPLATE.format(f"{self.INSPECT_TYPE}{self.__inspect_type__}", 
                                                         f"{self.INSPECT_CONTENT}{self.__inspect_content__}", 
                                                         f"{self.INSPECT_SUGGESTION}{self.__inspect_suggestion__}", 
                                                         f"{self.INSPECT_REASON}{self.__inspect_reason__}")



COMMENTS_XML_FILENAME               = 'comments.xml'
CONTENT_TYPES_XML_PATH              = '[Content_Types].xml'
WORD_COMMENTS_XML_PATH              = 'word/' + COMMENTS_XML_FILENAME
WORD_DOCUMENT_XML_PATH              = 'word/document.xml'
WORD_RELS_DOCUMENT_XML_RELS_PATH    = 'word/_rels/document.xml.rels'
UTF8_ENCODING                        = 'UTF-8'



class Comment:
    COMMENT_FORMATE_TEMPLATE =  '<w:comment w:id="{}" w:author="{}" w:date="{}" w:initials="{}">' \
                                '{}' \
                                '</w:comment>' \
                                '</w:comments>'
    COMMENT_DATE_FORMAT      =  "%Y-%m-%d %H:%M:%S" 
    DEF_COMMENT_AUTHOR       =  "QingPingMo"
    DEF_COMMENT_INITIALS     =  "QPM"
    
    
    def __init__(self, 
                 id, 
                 comment_content, comment_context, comment_suggestion, 
                 comment_reason = "", comment_type = "错别字",
                 author=DEF_COMMENT_AUTHOR, initials=DEF_COMMENT_INITIALS):
        self.__id__             = id
        self.__author__         = author
        self.__date__           = datetime.now().strftime(self.COMMENT_DATE_FORMAT)
        self.__initials__       = initials
        self.__comment_text__   = CommentText(comment_content, 
                                              comment_context,
                                              comment_suggestion, 
                                              comment_reason, 
                                              comment_type)

    @property
    def id(self):
        return self.__id__

    @property
    def content(self):
        return self.__comment_text__.__inspect_content__
    
    @content.setter  # Setter method
    def content(self, new_value):
         self.__comment_text__.__inspect_content__ = new_value

    @property
    def suggestion(self):
        return self.__comment_text__.__inspect_suggestion__

    @property
    def reason(self):
        return self.__comment_text__.__inspect_reason__

    @property
    def context(self):
        return self.__comment_text__.__inspect_context__
    
    @context.setter  # Setter method
    def context(self, new_value):
         self.__comment_text__.__inspect_context__ = new_value


    def format(self):
        return self.COMMENT_FORMATE_TEMPLATE.format(self.__id__, 
                                                    self.__author__, 
                                                    self.__date__, 
                                                    self.__initials__, 
                                                    self.__comment_text__.format())



class WordCommentsXmlFile: 
    # Generate comments.xml content....
    DEF_WORD_COMMENTS_XML = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<w:comments xmlns:wpc="http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" xmlns:cx="http://schemas.microsoft.com/office/drawing/2014/chartex" xmlns:cx1="http://schemas.microsoft.com/office/drawing/2015/9/8/chartex" xmlns:cx2="http://schemas.microsoft.com/office/drawing/2015/10/21/chartex" xmlns:cx3="http://schemas.microsoft.com/office/drawing/2016/5/9/chartex" xmlns:cx4="http://schemas.microsoft.com/office/drawing/2016/5/10/chartex" xmlns:cx5="http://schemas.microsoft.com/office/drawing/2016/5/11/chartex" xmlns:cx6="http://schemas.microsoft.com/office/drawing/2016/5/12/chartex" xmlns:cx7="http://schemas.microsoft.com/office/drawing/2016/5/13/chartex" xmlns:cx8="http://schemas.microsoft.com/office/drawing/2016/5/14/chartex" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:aink="http://schemas.microsoft.com/office/drawing/2016/ink" xmlns:am3d="http://schemas.microsoft.com/office/drawing/2017/model3d" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:oel="http://schemas.microsoft.com/office/2019/extlst" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:m="http://schemas.openxmlformats.org/officeDocument/2006/math" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:wp14="http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" xmlns:wp="http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" xmlns:w10="urn:schemas-microsoft-com:office:word" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" xmlns:w14="http://schemas.microsoft.com/office/word/2010/wordml" xmlns:w15="http://schemas.microsoft.com/office/word/2012/wordml" xmlns:w16cex="http://schemas.microsoft.com/office/word/2018/wordml/cex" xmlns:w16cid="http://schemas.microsoft.com/office/word/2016/wordml/cid" xmlns:w16="http://schemas.microsoft.com/office/word/2018/wordml" xmlns:w16du="http://schemas.microsoft.com/office/word/2023/wordml/word16du" xmlns:w16sdtdh="http://schemas.microsoft.com/office/word/2020/wordml/sdtdatahash" xmlns:w16sdtfl="http://schemas.microsoft.com/office/word/2024/wordml/sdtformatlock" xmlns:w16se="http://schemas.microsoft.com/office/word/2015/wordml/symex" xmlns:wpg="http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" xmlns:wpi="http://schemas.microsoft.com/office/word/2010/wordprocessingInk" xmlns:wne="http://schemas.microsoft.com/office/word/2006/wordml" xmlns:wps="http://schemas.microsoft.com/office/word/2010/wordprocessingShape" mc:Ignorable="w14 w15 w16se w16cid w16 w16cex w16sdtdh w16sdtfl w16du wp14"></w:comments>'
    WORD_COMMENTS_XML_LAST_CLOSING_TAG_OFFSET = -len('</w:comments>')
    
    
    def __init__(self, zip_file = None):
        if zip_file is not None:
            self.__file_content__ = zip_file.read(WORD_COMMENTS_XML_PATH).decode(UTF8_ENCODING)
        else:
            self.__file_content__ = self.DEF_WORD_COMMENTS_XML
        self.__file_content_updated__ = False
        self.__inserted_commentobjs__ = {}

    
    def nextCommentId(self):
        if self.__file_content__.find('<w:comment ') == -1:
            return 0
        else:
            comment_id = re.compile(r'(?<=id=")\d+')                        #Regular Express: '<w:comments w:id='
            return (int(max(comment_id.findall(self.__file_content__)))+1)  #max comment id + 1 

    
    def insertCommentObj(self, commentObj:Comment):
        if commentObj.id in self.__inserted_commentobjs__:
            return False
        
        self.__file_content__ = self.__file_content__[:self.WORD_COMMENTS_XML_LAST_CLOSING_TAG_OFFSET] + commentObj.format()
        if self.__file_content_updated__ == False:
            self.__file_content_updated__ = True
        
        self.__inserted_commentobjs__[commentObj.id] = commentObj
        return True


    def writeToFile(self):
        if self.__file_content_updated__ == True:
            with open(WORD_COMMENTS_XML_PATH,'w', encoding=UTF8_ENCODING) as c_f:
                c_f.write(self.__file_content__)
            self.__file_content_updated__ = False



class WordDocumentXmlFile: 
    COMMENT_FORMATE_TEMPLATE =  '</w:t></w:r>' \
                                '<w:commentRangeStart w:id="{}"/>' \
                                '<w:r>{}<w:t>{}</w:t></w:r>' \
                                '<w:commentRangeEnd w:id="{}"/>' \
                                '<w:commentReference w:id="{}"/>' \
                                '<w:r>{}<w:t>'


    def __init__(self, zip_file):
        self.__file_content__         = zip_file.read(WORD_DOCUMENT_XML_PATH).decode(UTF8_ENCODING)
        self.__file_content_updated__ = False
        self.__no_matched_count__     = 0


        # Removing Non - Breaking Spaces 
        self.__file_content__ = self.__file_content__.replace("\xa0", " ")
        self.__file_content__ = self.__file_content__.replace("&quot;", '"')

    def insertCommentObjIntoFounderDoc(self, commentObj:Comment):
        # find the first occurrence of commentObj. and 
        # replace it with comment related elements 
        # <commentRangeStart/>{comment.__original_text__}<commentRangeEnd/>
        # <commentReference/>
        # bkKnowledge1082816

        if self.__file_content__.find(commentObj.content) != -1: 
            replacement = self.COMMENT_FORMATE_TEMPLATE.format(commentObj.id, 
                                                               commentObj.content, 
                                                               commentObj.id, 
                                                               commentObj.id)           

            match = re.search(rf'"{commentObj.id}"', self.__file_content__)
            if match:  
                sdt_start = self.__file_content__.rfind("<w:sdt>", 0, match.start())
                sdt_end   = self.__file_content__.find("</w:sdt>", match.start())
                if sdt_start != -1:
                    if self.__file_content__.find("<w:commentReference", sdt_start, sdt_end) != -1:
                        # comment already inserted due to duplicated id comments 
                        return
                    text_start = self.__file_content__.find("<w:t>", sdt_start, sdt_end)
                    while text_start != -1:
                        text_end = self.__file_content__.find("</w:t>", text_start, sdt_end)
                        content_start = self.__file_content__.find(f"{commentObj.content}", text_start, text_end)
                        if content_start != -1:
                            self.__file_content__ = self.__file_content__[0:content_start] + \
                                                    replacement + \
                                                    self.__file_content__[content_start+len(commentObj.content):]
                            break
                        else:
                            text_start = self.__file_content__.find("<w:t>", text_end+len('</w:t>'), sdt_end)
                if self.__file_content_updated__ == False:
                    self.__file_content_updated__ = True


    def __rigthmost_partial_matched__(self, run_text, comment_context):
        max_match = min(len(run_text), len(comment_context))
        
        while max_match > 0:
            if run_text[-max_match:] == comment_context[0:max_match]:
                return max_match
            else:
                max_match -= 1

        return max_match

    def __getRunText__(self, run_start):
        run_end   = self.__file_content__.find("</w:r>", run_start)
        t_matchs = list(re.finditer('<w:t[^>]*>(.*?)</w:t>', self.__file_content__[run_start:run_end]))
        if t_matchs:
            return (t_matchs[0].start(1), t_matchs[0].group(1))
        return ()
    

    def __locate_content_offset__(self, context, content):
        '''
        '''
        content_start = -1
        grp_offset = context.find(content)
        if grp_offset >= 0:
            part_of_esc_seq = False
            for xmlEscSeqMatch in re.finditer("&(lt|gt|amp|apos|quot);", context):
                if grp_offset < xmlEscSeqMatch.start():
                    break
                elif grp_offset <= xmlEscSeqMatch.end(): 
                    grp_offset = context.find(content, xmlEscSeqMatch.end())
                    # No more the same text, 
                    if grp_offset < 0:
                        part_of_esc_seq = True
                        break
                    continue
                else: # grp_offset > xmlEscSeqMatch.end():
                    break
            if part_of_esc_seq == False:
                content_start = grp_offset
        return content_start
                        

    def __locateCommentContentOffset_v2__(self, run_id:int, commentObj):
        content_start = (-1, 0)
        content_offset = self.__locate_content_offset__(commentObj.context, commentObj.content)
        # Something wrong
        if content_offset < 0:
            log_info(f"{repr(commentObj.content)} can not be located in {repr(commentObj.context)}")
            commentObj.content = commentObj.context
            content_offset = 0

        run_list = list(re.finditer('<w:r>', self.__file_content__))
        while True:
            if run_id >= len(run_list):
                log_err(f"run_id[{run_id}] exceeds the len of list[{len(run_list)}]")
                break

            run_start = run_list[run_id].start()
            run_text = self.__getRunText__(run_start)
            if not run_text:
                # Skip Emptyp Run
                run_id += 1
                continue

            log_debug(f"run_id:{run_id}/{repr(run_text)}/{repr(commentObj.context)}/{repr(commentObj.content)}")
            if run_text[1].find(commentObj.context) >= 0:
                    # Case #1： Run_Text Contains the Context
                    content_start = (run_start +  run_text[0] + run_text[1].find(commentObj.context) + content_offset, 
                                     len(run_text[1]) - run_text[1].find(commentObj.context) - content_offset)
                    break

            rightmost_maxmatch = self.__rigthmost_partial_matched__(run_text[1], commentObj.context)
            if rightmost_maxmatch == 0:  
                # Not right most partial match
                # Tried next run
                run_id += 1
                continue

            concated_run_text = run_text[1][-rightmost_maxmatch:]
            flagments_info = []
            flagments_info.append((run_start+run_text[0]+len(run_text[1])-rightmost_maxmatch, rightmost_maxmatch))
            log_debug(f'rightmost_maxmatch[1]:[{flagments_info[-1]}]')
            while True: # Context span in multiple runs
                if run_id >= len(run_list):
                    log_err(f"run_id[{run_id+1}] exceeds the len of list[{len(run_list)}]")
                    break

                next_run_start = run_list[run_id+1].start()
                next_run_text = self.__getRunText__(next_run_start)
                if not next_run_text:
                    # Skip Emptyp Run
                    log_debug(f"rightmost_maxmatch[2]:empty run[{run_id+1}]")
                    flagments_info.append((next_run_start, 0))
                    run_id += 1
                    continue

                log_debug(f"rightmost_maxmatch[3-1]:[{concated_run_text}][{next_run_text[1]}]")
                concated_run_text  += next_run_text[1]
                log_debug(f"rightmost_maxmatch[3]:[{concated_run_text}]")
                if concated_run_text.find(commentObj.context) >= 0:
                    # Case #2: Concated Run_text contains the context
                    total_len = 0
                    for info in flagments_info:
                        # concat all the previous fragments
                        if (total_len + info[1]) < content_offset:
                            total_len += info[1]
                        else:
                            # Content offset is found in fragments
                            content_start = (info[0] + content_offset - total_len, total_len + info[1]- content_offset)
                            log_debug(f"rightmost_maxmatch[3+2]:[{content_start}]")
                            break # for info in flagments_info

                    if content_start[0] < 0:
                        # Content offset is found in next_run
                        content_start = (next_run_start + next_run_text[0] + content_offset - total_len, 
                                        len(next_run_text[1]) -content_offset + total_len)
                        log_debug(f"rightmost_maxmatch[3+3]:[{content_start}]")

                    log_debug(f'rightmost_maxmatch[4]:[{self.__file_content__[info[0]:info[0]+20]}]')
                    log_debug(f'rightmost_maxmatch[4]:[{self.__file_content__[content_start[0]:content_start[0]+20]}]')
                    break # while True: Context span in multiple runs

                elif commentObj.context.startswith(concated_run_text):
                    # Still meets condition of context span in multiple runs
                    flagments_info.append((next_run_start+next_run_text[0], len(next_run_text[1])))
                    log_debug(f'rightmost_maxmatch[5]:[{flagments_info[-1]}]')
                    run_id += 1
                    continue
                else:
                    log_debug(f"rightmost_maxmatch[6]:")
                    rightmost_maxmatch = 0
                    # No right most partial matched found
                    # Tried next run
                    run_id += 1
                    break
                
            if content_start[0] > 0:
                # Case #2 is hit
                break
          
        if content_start[0] < 0:
            self.__no_matched_count__ += 1
            offset = self.__file_content__.find(commentObj.context)
            log_err(f"Can not handle [context]/[content]/[suggestion] at {self.__no_matched_count__}: "\
                    f"[{commentObj.context}]/[{commentObj.content}]/[{commentObj.suggestion}]/[{commentObj.id}]")

        return content_start

    def __locateCommentContentOffset__(self, run_id:int, commentObj):
        content_start = -1
        r_matches = list(re.finditer('<w:r>', self.__file_content__))
        log_info(f"r_matches total count == {len(r_matches)}")
        for run in r_matches[(run_id + self.__new_comments_count__*2):]:
            run_start = run.start()
            run_end   = self.__file_content__.find("</w:r>", run_start)
            for t_match in list(re.finditer('<w:t[^>]*>(.*?)</w:t>', self.__file_content__[run_start:run_end])):
                context_offset = t_match.group(1).find(commentObj.context)
                if context_offset == -1:
                    if self.__rigthmost_partial_matched__(t_match.group(1), commentObj.context) == 0:
                        # run_text right most None partial matched with comment context
                        continue
                    # run_text right most partial matched with comment context
                    continue

                grp_offset = t_match.group(1).find(commentObj.content)
                
                if grp_offset > 0:
                    part_of_esc_seq = False
                    for xmlEscSeqMatch in re.finditer("&(lt|gt|amp|apos|quot);", t_match.group(1)):
                        if grp_offset < xmlEscSeqMatch.start():
                            break
                        elif grp_offset <= xmlEscSeqMatch.end(): 
                            grp_offset = t_match.group(1).find(commentObj.content, xmlEscSeqMatch.end())
                            # No more the same text, 
                            if grp_offset < 0:
                                part_of_esc_seq = True
                                break
                            continue
                        else: # grp_offset > xmlEscSeqMatch.end():
                            break
                    if part_of_esc_seq == False:
                        content_start = run_start + t_match.start(1) + grp_offset
                        break
            if content_start > 0:
                break
            
        if content_start < 0:
            self.__no_matched_count__ += 1
            offset = self.__file_content__.find(commentObj.context)
            log_err(f"Can not handle [context]/[content]/[suggestion] at {self.__no_matched_count__}: "\
                    f"[{commentObj.context}]/[{commentObj.content}]/[{commentObj.suggestion}]/[{commentObj.id}]")
 
        return content_start



    def __retrieve_content_style__(self, content_start):
        '''
        <w:r>
            
            <w:rPr><w:rFonts w:hint="eastAsia" w:ascii="宋体" w:hAnsi="宋体" w:eastAsia="宋体" w:cs="宋体" /></w:rPr>
            ^                                                                                                     ^
            |______________________________________  content_style   ____________________________________________ |                                        
            <w:t xml:space="preserve"> xml text</w:t>
                                        ^
                                        |__content_start
        </w:r> 
        '''        
        run_offset = self.__file_content__.rfind("<w:r>", 0, content_start)
        if run_offset < 0:
            return ' '
        text_offset = self.__file_content__.rfind('<w:t ', run_offset, content_start)
        
        if text_offset == -1:
            text_offset = self.__file_content__.rfind('<w:t>', run_offset, content_start)
            if text_offset == -1:
                return ' '
                
        return self.__file_content__[run_offset+5:text_offset]

    def insertCommentObj(self, run_id:int, commentObj:Comment):
        # find the occurrence of commentObj context. and 
        # replace its content with comment related elements 
        # <commentRangeStart/>{comment.__original_text__}<commentRangeEnd/>
        # <commentReference/>

        content_start = self.__locateCommentContentOffset_v2__(run_id, commentObj)
        content_style = self.__retrieve_content_style__(content_start[0])
        if content_start[0] >= 0:
            if content_start[1] <= len(commentObj.content):
                #log_info(f"[{commentObj.context}] was found at offset [{content_start}]")
                replacement = self.COMMENT_FORMATE_TEMPLATE.format(commentObj.id, 
                                                                   content_style,
                                                                   commentObj.content[0:content_start[1]], 
                                                                   commentObj.id, 
                                                                   commentObj.id,
                                                                   content_style)  
                self.__file_content__ = self.__file_content__[0:content_start[0]] + \
                                        replacement + \
                                        self.__file_content__[content_start[0]+content_start[1]:]                
            else:
                #log_info(f"[{commentObj.context}] was found at offset [{content_start}]")
                replacement = self.COMMENT_FORMATE_TEMPLATE.format(commentObj.id, 
                                                                   content_style,
                                                                   commentObj.content, 
                                                                   commentObj.id, 
                                                                   commentObj.id,
                                                                   content_style)  
                self.__file_content__ = self.__file_content__[0:content_start[0]] + \
                                        replacement + \
                                        self.__file_content__[content_start[0]+len(commentObj.content):]
            if self.__file_content_updated__ == False:
                self.__file_content_updated__ = True
            return True
        
        return False




    def writeToFile(self):
        if self.__file_content_updated__ == True:
            with open(WORD_DOCUMENT_XML_PATH,'w', encoding=UTF8_ENCODING) as c_f:
                c_f.write(self.__file_content__)
            self.__file_content_updated__ = False



class ContentTypesXmlFile:
    OVERRIDE_COMMENTS_XML = '<Override PartName="/word/comments.xml" ' \
                                      'ContentType="application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml"/>' \
                            '</Types>'
    OVERRIDE_COMMENTS_XML_LAST_CLOSING_TAG_OFFSET = -len('</Types>')


    def __init__(self, zip_file):
        self.__file_content__ = zip_file.read(CONTENT_TYPES_XML_PATH).decode(UTF8_ENCODING)
        self.__file_content_updated__ = False
        if self.__file_content__.find(COMMENTS_XML_FILENAME) == -1:
            self.__file_content__ = self.__file_content__[:self.OVERRIDE_COMMENTS_XML_LAST_CLOSING_TAG_OFFSET] + self.OVERRIDE_COMMENTS_XML
            self.__file_content_updated__ = True


    def writeToFile(self):
        if self.__file_content_updated__ == True:
            with open(CONTENT_TYPES_XML_PATH,'w', encoding=UTF8_ENCODING) as c_f:
                c_f.write(self.__file_content__)



class WordRelsDocumentXmlRelsFile:
    RELATIONSHIP =  '<Relationship Id="rId{}" ' \
                                  'Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments" ' \
                                  'Target="comments.xml"/>' \
                    '</Relationships>'
    RELATIONSHIP_LAST_CLOSING_TAG_OFFSET = -len('</Relationships>')    
    

    def __init__(self, zip_file):
        self.__file_content__ = zip_file.read(WORD_RELS_DOCUMENT_XML_RELS_PATH).decode(UTF8_ENCODING)
        self.__file_content_updated__ = False
        if self.__file_content__.find(COMMENTS_XML_FILENAME) == -1:
            repatternIDs = re.compile(r'(?<=Id="rId)\d+')     #Regular Express: '<Relationship Id="rId'
            rIDNum = len(repatternIDs.findall(self.__file_content__))
            self.RELATIONSHIP.format(rIDNum+1)
            self.__file_content__ = self.__file_content__[:self.RELATIONSHIP_LAST_CLOSING_TAG_OFFSET] + self.RELATIONSHIP.format(rIDNum+1)
            self.__file_content_updated__ = True


    def writeToFile(self):
        if self.__file_content_updated__ == True:
            with open(WORD_RELS_DOCUMENT_XML_RELS_PATH, 'w', encoding=UTF8_ENCODING) as c_f:
                c_f.write(self.__file_content__)



class WordDocument:
    '''
    '''
    def __init__(self, file_path):
        self.__file_path__                          = file_path
        self.__file_content_updated__               = False
        self.__next_comment_id__                    = -1
        self.__file_opened__                        = False
        self.__word_comments_xml_file__             : WordCommentsXmlFile
        self.__word_document_xml_file__             : WordDocumentXmlFile
        self.__content_types_xml_file__             : ContentTypesXmlFile
        self.__word_rels_document_xml_rel_file__    : WordRelsDocumentXmlRelsFile

    def __enter__(self):
        # Read comments related xml files
        self.open()
        return self


    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()   
        if exc_type is not None:
            # Get traceback file name, line, etc.
            tb_info = traceback.extract_tb(exc_tb)
            filename, line, func, text = tb_info[-1]
            log_err(f"Exception Context: {exc_type.__name__} {exc_val}")
            log_err(f"file: {filename}, line:{line}")
            log_err(f"text: {text}")
            # print Exception Traceback
            traceback.print_tb(exc_tb)
            
            # Supress Exception
            return True
        return False


    def __nextCommentId__(self):
        if self.__file_opened__ == False:
            self.open()

        if self.__next_comment_id__ == -1:
            self.__next_comment_id__ = self.__word_comments_xml_file__.nextCommentId()
        else:
            self.__next_comment_id__ += 1

        return self.__next_comment_id__
    

    def __writeToFile__(self):
        if self.__file_content_updated__ == True:
            # save the changes into extracted files
            self.__content_types_xml_file__.writeToFile()
            self.__word_rels_document_xml_rel_file__.writeToFile()
            self.__word_document_xml_file__.writeToFile()
            self.__word_comments_xml_file__.writeToFile()

            # Remove original docx file
            if os.path.exists(self.__file_path__):
                os.remove(self.__file_path__)   

            # Create a new docx file and write all the extracted files
            with ZipFile(self.__file_path__, mode='w') as new_doc_file:
                if WORD_COMMENTS_XML_PATH not in self.__file_name_list__:
                    new_doc_file.write(WORD_COMMENTS_XML_PATH)

                for file_name in self.__file_name_list__:
                    if os.path.isfile(file_name):
                        new_doc_file.write(file_name)   

            self.__file_content_updated__ = False


    def __clean__(self):
        # Clean the extracted files
        # Step01: Remove files
        for file_name in self.__file_name_list__:
            if os.path.exists(file_name):
                if os.path.isfile(file_name):
                    os.remove(file_name)
                else:
                    shutil.rmtree(file_name)

        # Step02: Remove dirs
        if os.path.exists("_rels"):
            shutil.rmtree("_rels")
        if os.path.exists("word"):
            shutil.rmtree("word")
        if os.path.exists("docProps"):
            shutil.rmtree("docProps")
        if os.path.exists("customXml"):
            shutil.rmtree("customXml")


    def open(self, file_path = None):
        if self.__file_opened__ == True:
            return

        if file_path is not None:
            self.__file_path__ = file_path

        with open(self.__file_path__, 'rb') as docx_file:
            with ZipFile(docx_file) as zip_file:
                zip_file.extractall()                 
                self.__file_name_list__ = zip_file.namelist()   

                if WORD_COMMENTS_XML_PATH not in self.__file_name_list__:
                    self.__word_comments_xml_file__      = WordCommentsXmlFile()
                else:
                    self.__word_comments_xml_file__      = WordCommentsXmlFile(zip_file)

                self.__word_document_xml_file__          = WordDocumentXmlFile(zip_file)
                self.__content_types_xml_file__          = ContentTypesXmlFile(zip_file)
                self.__word_rels_document_xml_rel_file__ = WordRelsDocumentXmlRelsFile(zip_file)

        self.__file_opened__ = True

    def close(self):
        self.__writeToFile__()
        self.__clean__()


    def insertCommentObj(self, run_id:int, commentObj:Comment):
        if self.__word_comments_xml_file__ is None:
            self.open()

        if self.__word_comments_xml_file__.insertCommentObj(commentObj) == True:
            if self.__file_content_updated__ == False:
                self.__file_content_updated__ = True
            return self.__word_document_xml_file__.insertCommentObj(run_id, commentObj)
        return False



class DocWithFounderComments:
    def __init__(self, file_path:str):
        self.__file_path__      = file_path
        self.__reviews_dict__   = {}
        self.__all_runs__       = []
        self.__docx__           : docx.document.Document



    def __enter__(self):
        # Read comments related xml files
        return self.open()


    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()   
        if exc_type is not None:
            # Get traceback file name, line, etc.
            tb_info = traceback.extract_tb(exc_tb)
            filename, line, func, text = tb_info[-1]
            log_err(f"Exception Context: {exc_type.__name__} {exc_val}")
            log_err(f"file: {filename}, line:{line}")
            log_err(f"text: {text}")
            # print Exception Traceback
            traceback.print_tb(exc_tb)
            
            # Supress Exception
            return True
        return False    


    def open(self, file_path = None):
        if file_path is not None:
            self.__file_path__ = file_path
        self.__docx__ = docx.Document(self.__file_path__)
        return self


    def close(self):
        # Save any changes if needed
        self.__docx__.save(self.__file_path__) # type: ignore


    # Sample Review Element content
    # <Review   inspectType="易错词检查" 
    #           inspectCategory="错误" 
    #           errorCategory="1" operate="1" 
    #           amend="0" amendTime="" amendContent="" amendColor="" 
    #           inspectTypeEn="errorwords" rule="" 
    #           lookup="窈窕淑女" 
    #           content="窃窕淑女" 
    #           source="" errorType="1" AllIndex="0" 
    #           context="关关睢鸠，在河之洲。" 
    #           id="1140414" 
    #           bkName="bkReivew1140414" note="0" index="10"/>
    def extract_all_reviews(self):
        if self.__reviews_dict__:
            return self.__reviews_dict__
        
        for rel in self.__docx__.part.rels:
            rel = self.__docx__.part.rels[rel]
            if "customXml" in rel.target_ref:
                customXml_part = rel.target_part
                customXml_xml_gbk = customXml_part.blob
                #unicode_str = customXml_xml_gbk.decode('gbk')  # 解码为 Unicode 字符串
                #utf8_bytes = unicode_str.encode('utf-8')  # 编码为 UTF-8 字节流
                # Parsing the XML 
                #utf_8_parser = ET.XMLParser(encoding='utf-8', recover=True)
                root = ET.fromstring(customXml_xml_gbk)         # type: ignore
                for review in root.xpath('//*[local-name()="Review"]'):   
                    bkName = review.get("bkName",'')
                    id     = review.get("id",'')
                    inspectType = review.get("inspectType",'')
                    context= review.get("context", '')
                    suggestion= review.get("lookup", '')
                    content = review.get("content", '')
                    reason = review.get("errorType", '')
                    if id != '':
                        self.__reviews_dict__[bkName] = Comment(id, 
                                                                content, 
                                                                context.replace("\xa0", " "),  #Removing Non - Breaking Spaces
                                                                suggestion, 
                                                                reason, 
                                                                inspectType, 
                                                                "方正", 
                                                                "FZ")
        
        return self.__reviews_dict__


    def __least_context__(self, content, context, local_offset, all_text):
        least_start = local_offset
        least_end   = local_offset+len(content)

        # Extend prefix chars       
        while least_start > 0:
            least_start -= 1
            offset = all_text.find(context[least_start:least_end])
            if offset > 0:
                if all_text.find(context[least_start:least_end], offset + least_end - least_start + 1) > 0:
                    # More than one
                    continue
                else:
                    # Only one
                    break
            else:
                # Restore Previous least match
                least_start += 1 
                break

        # Extend prefix chars
        while least_end < len(context):
            least_end += 1

            offset = all_text.find(context[least_start:least_end])
            if offset > 0:
                if all_text.find(context[least_start:least_end], offset + least_end - least_start + 1) > 0:
                    # More than one
                    continue
                else:
                    # Only one
                    break
            else:
                # Restore Previous least match
                least_end -= 1
                break
        
        return  context[least_start:least_end]


    def remove_dup_comments_with_same_context(self): 
        context_maps  = {}
        dup_review_keys = []
        for reviewkey in  self.__reviews_dict__:
            contextkey = self.__reviews_dict__[reviewkey].context

            if contextkey in context_maps:
                context_maps[contextkey].append((self.__reviews_dict__[reviewkey], reviewkey))
            else:
                context_maps[contextkey] = [(self.__reviews_dict__[reviewkey],reviewkey)]
        keep = 0
        pop = 0
        for contextKey in context_maps:
            content_maps = {}
            for reivewtuple in context_maps[contextKey]:
                if reivewtuple[0].content not in content_maps:
                    content_maps[reivewtuple[0].content] = 1
                    keep += 1
                else:
                    dup_review_keys.append(reivewtuple[1])
                    pop += 1

        for reviewkey in dup_review_keys:
            self.__reviews_dict__.pop(reviewkey)
       


    def concate_context_content(self, context, content):
        max_match = len(content)
        while max_match > 0:
            if context[-max_match:] == content[0:max_match]:
                return context[0:-max_match] + content
            else:
                max_match -= 1
        return context+content
    

    def compact_comments_context(self):
        self.extract_all_reviews()
        self.extract_all_runs()

        all_text = ""
        for run in self.__all_runs__:
            all_text += run[2]

        newReviewObjects = {}
        for review_key in self.__reviews_dict__:
            content = self.__reviews_dict__[review_key].content
            context = self.__reviews_dict__[review_key].context

            multiple_found = 0

            offset = context.find(content)
            if offset < 0:
                if all_text.find(self.concate_context_content(context, content)) > 0:
                    context = self.concate_context_content(context, content)
                    offset = len(context)
                else:
                    log_err(f"concated context not found [{repr(context)}/{repr(content)}/{self.concate_context_content(context, content)}]")

            while offset > 0:
                least_context = self.__least_context__(content, context, offset, all_text)
                if multiple_found == 0:
                    self.__reviews_dict__[review_key].context = least_context
                else:
                    newReviewObjects[f'{review_key}_{multiple_found}'] = copy.deepcopy(self.__reviews_dict__[review_key])
                    newReviewObjects[f'{review_key}_{multiple_found}'].context = least_context
                multiple_found += 1

                offset = context.find(content, offset + len(content))
                
        for newObjKey in newReviewObjects:
            self.__reviews_dict__[newObjKey] = newReviewObjects[newObjKey]


    def extract_all_runs(self):
        """
        All the runs are extraced from the docx,
        and are organized in a list like:
            [[run_id, commented_flag, run_text[, bkname]*], ... ]
            if the run_text is already commented by Doubao
                commented_flag > 0
            else
                commented_flag == 0

        sample xml format:
            <w:p w:rsidR="00C537B8" w:rsidRDefault="002A24B9">
                <w:sdt>
                    <w:sdtPr>
                        <w:alias w:val="古诗文检查,易错词检查"/><w:id w:val="10323"/>
                    </w:sdtPr>
                    <w:sdtEndPr/>
                    <w:sdtContent>
                        <w:bookmarkStart w:id="19" w:name="bkReivew10323"/>
                        <w:bookmarkStart w:id="20" w:name="bkKnowledge10323"/>
                        <w:r w:rsidR="00A13580">
                            <w:rPr>
                                <w:rFonts w:ascii="楷体" w:eastAsia="楷体" w:hAnsi="楷体" w:cs="楷体" w:hint="eastAsia"/>
                                <w:color w:val="FF0000"/>
                            </w:rPr>
                            <w:t>关关</w:t>
                        </w:r>
                        <w:r>
                            <w:rPr><w:rFonts w:hint="eastAsia"/></w:rPr>
                            <w:t>睢</w:t>
                        </w:r>
                        <w:bookmarkEnd w:id="19"/>                        
                        <w:r>
                            <w:rPr><w:rFonts w:hint="eastAsia"/></w:rPr>
                            <w:t>鸠，</w:t>
                        </w:r>

                        <w:bookmarkEnd w:id="20"/>
                    </w:sdtContent>
                </w:sdt>
                <w:r w:rsidR="00A13580">
                    <w:rPr>
                        <w:rFonts w:ascii="楷体" w:eastAsia="楷体" w:hAnsi="楷体" w:cs="楷体" w:hint="eastAsia"/>
                    </w:rPr>
                    <w:t>在河之洲。</w:t>
                </w:r>
            </w:p>
        """
        if self.__all_runs__:
            return self.__all_runs__
        
        try:
            paragraph_id = 0
            # Iterate all the paragraphs' runs           
            for paragraph in self.__docx__.paragraphs: # type: ignore
                sdtcontent_flag = False
                bknamelist = []
                run_text = ""
                for paragraph_child in paragraph._element.iter(): # type: ignore
                    if paragraph_child.tag == (ELEMENT_TAG_SDTCONTENT):
                        # Populate previous run_text
                        if len(run_text) > 0:
                            self.__all_runs__.append([paragraph_id, len(bknamelist), run_text]) 
                            run_text = ""    
                        
                        # Clean bknamelist
                        bknamelist = []           
                        continue

                    if paragraph_child.tag == (ELEMENT_TAG_BKSTART):
                        bkname = paragraph_child.get(ELEMENT_ATTR_NAME)
                        if bkname is not None and bkname.startswith("bk"):
                            
                            # Populate previous run_text
                            if len(run_text) > 0:
                                if len(bknamelist) == 0:
                                    self.__all_runs__.append([paragraph_id, len(bknamelist), run_text]) 
                                else:
                                    self.__all_runs__.append([paragraph_id, len(bknamelist), run_text, copy.deepcopy(bknamelist)])
                                run_text = ""
                            
                            #Append this bknamelist
                            bknamelist.append(bkname)
                        continue

                    if paragraph_child.tag == (ELEMENT_TAG_BKEND):
                        if len(bknamelist) > 0:
                            # Populate previous run_text
                            if len(run_text) > 0:
                                self.__all_runs__.append([paragraph_id, len(bknamelist), run_text, copy.deepcopy(bknamelist)])
                                run_text = ""
                            #Remove the first bknamelist
                            bknamelist.pop(0)
                        continue

                    if paragraph_child.tag == (ELEMENT_TAG_T):
                        run_text = run_text + paragraph_child.text.replace("\xa0", " ") # Removing Non - Breaking Spaces
                                      

                if len(run_text) > 0:
                    # Populate left run_text
                    self.__all_runs__.append([paragraph_id, len(bknamelist), run_text]) 
                    run_text = ""

                paragraph_id += 1
        except Exception as e:
            log_err(f"Parsing Error: {e}")

        return self.__all_runs__


    SMART_OFFSET_DELTA = 100
    
    def getTextCommentObjs(self, uncommented_text, text_offset):
        commentObjs = {}
        run_text_offset = 0
        for run in self.extract_all_runs():
            # Skill the leading runs
            if (run_text_offset + len(run[2])) < (text_offset-self.SMART_OFFSET_DELTA):
                run_text_offset += len(run[2])    
                continue

            # Append this run's comment objs
            if run[1] > 0:       
                for bkname in run[3]:
                    if bkname in self.extract_all_reviews():
                        if self.__reviews_dict__[bkname].context in uncommented_text:
                            commentObjs[bkname] = self.__reviews_dict__[bkname]

            # Check whether a run text already ahead the uncommented text
            run_text_offset += len(run[2])
            if run_text_offset > (text_offset + len(uncommented_text) + self.SMART_OFFSET_DELTA):
                break

        return commentObjs
   


def get_comment_from_openai(text):
    log_info("get_comment_from_openai")
    return None



class DocWithDoubaoComments:
    '''
        Object attributes:
            self.__file_path__      = file_path
            self.__all_runs__       = []
            self.__merged_runs__    = []
            self.__docx__           = None
            self.__word_doc__       = None
    '''
    def __init__(self, file_path):
        self.__file_path__          = file_path
        self.__all_runs__           = []
        self.__merged_runs__        = []
        self.__new_comments_count__ = 0   # Two more <w:r> will be added per one new comment


    def __enter__(self):
        # Read comments related xml files
        return self.open()


    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()   
        if exc_type is not None:
            # Get traceback file name, line, etc.
            tb_info = traceback.extract_tb(exc_tb)
            filename, line, func, text = tb_info[-1]
            log_err(f"Exception Context: {exc_type.__name__} {exc_val}")
            log_err(f"file: {filename}, line:{line}")
            log_err(f"text: {text}")
            # print Exception Traceback
            traceback.print_tb(exc_tb)
            
            # Supress Exception
            return True
        return False    


    def open(self, file_path = None):
        if file_path is not None:
            self.__file_path__ = file_path

        self.__docx__          = docx.Document(self.__file_path__)
        self.__word_doc__      = WordDocument(self.__file_path__)
        self.__word_doc__.open()
        return self


    def close(self):
        # Save any changes if possible
        self.__word_doc__.close()


    def merge_fourder_comments(self, founder_docx_path):
        with DocWithFounderComments(founder_docx_path) as founder_docx:
            founder_docx.compact_comments_context()
            founder_docx.remove_dup_comments_with_same_context()
            run_text_offset = 0
            for run in self.get_merged_runs():
                if run[1] == False:
                    #log_info(f"run=[{run[0]},{run[1]},{run[2]}], offset={run_text_offset}")
                    commentObjs = founder_docx.getTextCommentObjs(run[2], run_text_offset)
                    if not commentObjs:
                        ''''''
                        # Try to get comments throught calling LLM API
                        #commentObjs = get_comment_from_openai(run[2])
                        #log_info(f"No founder comment was found for the following text\n------\n{run[2]}\n------")

                    if commentObjs:
                        #log_info(commentObjs)
                        self.insertCommentObjs(int(run[0]),  commentObjs)
                run_text_offset += len(run[2])

    
    def get_merged_runs(self):
        """
        Compared  to self.__all_runs__,
        __merged_runs__ is the result of merging adjacent uncommented runs into one run 
        """
        if not self.__merged_runs__:
            if not self.extract_all_runs():
                 return []
            
            self.__merged_runs__.append(copy.deepcopy(self.__all_runs__[0]))
            for run in self.__all_runs__[1:]:
                if self.__merged_runs__[-1][1] == False and run[1] == False:
                    self.__merged_runs__[-1][2] = self.__merged_runs__[-1][2] + run[2]
                else:
                    self.__merged_runs__.append(run)

        return self.__merged_runs__


    def extract_all_runs(self):
        """
        All the runs are extraced from the docx,
        and are organized in a list like:
            [[run_id, commented_flag, run_text], ... ]
            if the run_text is already commented by Doubao
                commented_flag == True
            else
                commented_flag == False
        """
        if self.__all_runs__:
            return self.__all_runs__
        
        try:
            run_id = -1
            run_text = ""
            # Iterate all the paragraphs' runs
            for paragraph in self.__docx__.paragraphs:
                commented_flag = False
                for child in paragraph._element.iter(): # type: ignore
                    if  'commentRangeStart' in child.tag:
                        if len(run_text) > 0:
                            self.__all_runs__.append([run_id, commented_flag, run_text])     
                            run_text = ""                           
                        commented_flag = True
                        continue
                    if 'commentRangeEnd' in  child.tag:
                        if len(run_text) > 0:
                            self.__all_runs__.append([run_id, commented_flag, run_text])     
                            run_text = ""    

                        commented_flag = False
                        continue            
                    if child.tag == (ELEMENT_TAG_R):         
                        if len(run_text) > 0:
                            self.__all_runs__.append([run_id, commented_flag, run_text])     
                            run_text = ""                     
                        run_id += 1

                    if child.tag == (ELEMENT_TAG_T):   
                        run_text += child.text.replace("\xa0", " ")

                if len(run_text) > 0:
                    self.__all_runs__.append([run_id, commented_flag, run_text])     
                    run_text = ""  
                                   
        except Exception as e:
            log_err(f"Parsing Error: {e}")

        log_info(f'last run = {self.__all_runs__[-1]}, {run_id}')
        return self.__all_runs__
    
    
    def insertCommentObjs(self, run_id, commentObjs):
        comment_id = 0
        success = 0
        #log_info(f">>> insertCommentObjs({run_id}, {run_id+self.__new_comments_count__*2}) {len(commentObjs)}")
        for commentObjKey in commentObjs:
            if comment_id != commentObjs[commentObjKey].id:     
                comment_id = commentObjs[commentObjKey].id   
                if self.__word_doc__.insertCommentObj(run_id+self.__new_comments_count__*2, commentObjs[commentObjKey]) == True:
                    success += 1
                    
            else:
                continue
        self.__new_comments_count__ += success

        #log_info(f"<<< insertCommentObjs({run_id}, {run_id+self.__new_comments_count__*2} ) {len(commentObjs)}")
        return success


# Test Cases for this module
def test_convert_one_bookmark_to_comment(file_path):
    reviews = None
    with DocWithFounderComments(file_path) as wordDocx:
        reviews = wordDocx.extract_all_reviews()

    with WordDocument(file_path) as wordDocx:
        for review_id in reviews:
            wordDocx.insertCommentObj(0, reviews[review_id])



def test_extract_all_founder_reviews():
    file_path = sys.argv[1]

    with DocWithFounderComments(file_path) as wordDocx:
        reviews = wordDocx.extract_all_reviews()
        for review_id in reviews:
            log_info(reviews[review_id].format())



def test_remove_dup_comments_with_same_context():
    log_info("DocWithFounderComments.extract_all_runs()")
    with DocWithFounderComments("founder.docx") as founder_doc:
        founder_doc.compact_comments_context()
        founder_doc.remove_dup_comments_with_same_context()




def print_runs(all_runs):
    #run_id = -1
    for run in all_runs:
        log_info(run)
        '''
        if run_id == run[0]:
            log_info(f"<{run[1]}>{run[2]}</{run[1]}>")
        else:
            run_id = run[0]
            log_info(f"{run[0]}:\n<{run[1]}>{run[2]}</{run[1]}>")        
        '''




def handle_College_Chinese_docx():
    docx_filepath_tuples = [("documents/项目一  古诗歌鉴赏 (AI).docx", 
                             "documents/项目一  古诗歌鉴赏-方正.docx", 
                             "documents/项目一  古诗歌鉴赏 (QPM).docx"),
                            ("documents/项目二  文言文阅读（AI）.docx", 
                             "documents/项目二  文言文阅读-方正.docx", 
                             "documents/项目二  文言文阅读（QPM）.docx"),                             
                            ("documents/项目三  现代文阅读（AI）.docx", 
                             "documents/项目三  现代文阅读-方正.docx", 
                             "documents/项目三  现代文阅读（QPM）.docx"),
                            ("documents/项目四  应用文写作 (AI).docx", 
                             "documents/项目四  应用文写作-方正.docx", 
                             "documents/项目四  应用文写作 (QPM).docx")]    
        
    for docx_filepath_tuple in docx_filepath_tuples[2:3]:
        log_info(docx_filepath_tuple)
        if os.path.exists(docx_filepath_tuple[2]):
            os.remove(docx_filepath_tuple[2])
        shutil.copy2(docx_filepath_tuple[0], docx_filepath_tuple[2])

        with DocWithDoubaoComments(docx_filepath_tuple[2]) as doubao_doc:
            doubao_doc.merge_fourder_comments(docx_filepath_tuple[1])

def handle_one_docx_file():
    founder_file_path       = "founder.docx"
    doubao_file_path        = "doubao.docx"
    doubao_file_back_path   = "doubao_back.docx"

    if os.path.exists(doubao_file_path):
        os.remove(doubao_file_path)
    
    shutil.copy2(doubao_file_back_path, doubao_file_path)

    with DocWithDoubaoComments(doubao_file_path) as doubao_doc:
        doubao_doc.merge_fourder_comments(founder_file_path)    


def log_debug(msg):
    logging.getLogger(__name__).debug(msg)


def log_info(msg):
    logging.getLogger(__name__).info(msg)


def log_err(msg):
    logging.getLogger(__name__).error(msg)

if __name__ == '__main__':

    if os.path.exists('app.log'):
        os.remove('app.log')

    # Configure logging to file
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)  # Capture all INFO and above

    # File handler
    file_handler = logging.FileHandler('app.log', encoding='utf-8')
    file_handler.setLevel(logging.INFO)  # Log all INFO and above to file
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(formatter)

    # Add handler to the logger
    logger.addHandler(file_handler)

    handle_College_Chinese_docx()
    #test_remove_dup_comments_with_same_context()





