#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
from difflib import SequenceMatcher
from docx import Document
from docx.text.paragraph import Paragraph
from docx.table import Table
from tqdm import tqdm

def num_to_chinese(n):
    """将数字转换为中文数字"""
    if n == 0:
        return '零'
    nums = '零一二三四五六七八九'
    units = '十百千万亿'
    str_n = str(n)
    res = []
    len_n = len(str_n)
    for i, char in enumerate(str_n):
        digit = int(char)
        if digit == 0:
            if res and res[-1] != '零':
                res.append('零')
        else:
            unit = '' if i == len_n - 1 else units[len_n - i - 2] if len_n - i - 2 < len(units) else ''
            if unit == '十' and digit == 1 and len_n == 2 and i == 0:
                res.append('十')
            else:
                res.append(nums[digit] + unit)
    return ''.join(res).replace('零零', '零').rstrip('零')

def get_document_elements(doc):
    """获取文档元素，按顺序返回段落和表格"""
    elements = []
    ns = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
    for child in doc._element.body:
        if child.tag == ns + 'p':
            para = Paragraph(child, doc)
            elements.append(('paragraph', para))
        elif child.tag == ns + 'tbl':
            table = Table(child, doc)
            elements.append(('table', table))
    return elements

def extract_blocks_with_style_sections(docx_path):
    """提取文本块并通过样式检测章节级别，自动生成编号路径，顶级用中文章节"""
    doc = Document(docx_path)
    blocks = []
    current_section = "文档根"
    levels = [0] * 9  # 支持最多9级标题，初始化计数器
    chapter_count = 0  # 独立章节计数器，从0开始
    
    ns = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
    for elem_type, elem in get_document_elements(doc):
        if elem_type == 'paragraph':
            text = elem.text.strip()
            if not text:
                continue
            
            # 如果段落在表格单元格中，跳过（由表格处理）
            if elem._p.getparent().tag == ns + 'tc':
                continue
            
            # 检查段落样式是否为标题（Heading 1-9）
            style_name = elem.style.name
            updated = False
            if style_name.startswith('Heading '):
                try:
                    level = int(style_name.split(' ')[1])  # 获取级别，如Heading 1 -> 1
                    if 1 <= level <= 9:
                        # 重置下级计数器
                        for i in range(level, 9):
                            levels[i] = 0
                        
                        if level == 1:
                            # 所有一级标题添加'第X章'，从第一章开始
                            chapter_count += 1
                            levels[0] = chapter_count
                            current_section = f"第{num_to_chinese(chapter_count)}章 {text}"
                        else:
                            # 子级，递增当前级计数器，使用数字点分隔
                            levels[level-1] += 1
                            numbering = '.'.join(str(levels[i]) for i in range(level) if levels[i] > 0)
                            current_section = f"{numbering} {text}"
                        
                        updated = True
                except ValueError:
                    pass  # 非标准Heading样式，忽略
            
            blocks.append({
                'text': text,
                'section': current_section
            })
            """
            # 调试打印：显示识别到的标题和section
            if updated:
                print(f"调试: 标题 '{text}' (级别 {level}) -> section '{current_section}'")
            """
        elif elem_type == 'table':
            table_text = []
            for i, row in enumerate(elem.rows):
                row_text = " | ".join(cell.text.strip() for cell in row.cells if cell.text.strip())
                if row_text:
                    table_text.append(f"行{i+1}: {row_text}")
            if table_text:
                full_table = "【表格】\n" + "\n".join(table_text)
                blocks.append({
                    'text': full_table,
                    'section': current_section
                })
    
    return blocks

def find_similar_blocks(blocks1, blocks2, threshold=0.60, min_length=15):
    risks = []
    total = len(blocks1)
    
    with tqdm(total=total, desc="🔍 检测雷同内容", unit="块") as pbar:
        for block1 in blocks1:
            text1 = block1['text']
            if len(text1) < min_length:
                pbar.update(1)
                continue
                
            best_sim = 0
            best_block2 = None
            for block2 in blocks2:
                text2 = block2['text']
                if len(text2) < min_length:
                    continue
                
                # 使用 SequenceMatcher 计算相似度
                matcher = SequenceMatcher(None, text1, text2)
                
                # 1. 整体相似度 (difflib默认算法，适合检测修改过的段落)
                ratio_sim = matcher.ratio()
                
                # 2. 包含相似度 (最长公共子串占短文本的比例，适合检测包含关系)
                # 例如：文档1的一大段包含文档2的一小段
                match = matcher.find_longest_match(0, len(text1), 0, len(text2))
                min_len = min(len(text1), len(text2))
                contain_sim = match.size / min_len if min_len > 0 else 0
                
                # 取两者最大值作为最终相似度
                sim = max(ratio_sim, contain_sim)
                
                if sim > best_sim:
                    best_sim = sim
                    best_block2 = block2
            
            if best_sim >= threshold and best_block2:
                # 提取实际重复的文本片段
                matcher = SequenceMatcher(None, text1, best_block2['text'])
                match = matcher.find_longest_match(0, len(text1), 0, len(best_block2['text']))
                
                # 如果是包含关系，提取公共子串；如果是整体相似，提取较短的那个作为展示
                if match.size > 0:
                    duplicate_content = text1[match.a: match.a + match.size]
                else:
                    duplicate_content = text1 if len(text1) < len(best_block2['text']) else best_block2['text']

                risks.append({
                    'file1_section': block1['section'],
                    'file1_content': text1,
                    'file2_section': best_block2['section'],
                    'file2_content': best_block2['text'],
                    'similarity': best_sim,
                    'duplicate_content': duplicate_content
                })
            pbar.update(1)
    
    return risks

def main():
    print("============ 文件雷同检测 ============\n")
    
    path1 = input("文件1路径: ").strip().strip('"')
    path2 = input("文件2路径: ").strip().strip('"')
    
    if not (os.path.exists(path1) and os.path.exists(path2)):
        print("❌ 文件不存在！")
        return
    
    name1 = os.path.basename(path1)
    name2 = os.path.basename(path2)
    
    print(f"\n📥 正在解析 {name1} ...")
    blocks1 = extract_blocks_with_style_sections(path1)
    
    print(f"📥 正在解析 {name2} ...")
    blocks2 = extract_blocks_with_style_sections(path2)
    
    print(f"\n📊 {name1}: {len(blocks1)} 块 | {name2}: {len(blocks2)} 块")
    
    if not blocks1 or not blocks2:
        print("⚠️ 未提取到有效内容")
        return
    
    # 设置检测参数
    threshold = 0.60
    min_length = 15
    
    print("\n⏳ 开始深度比对...\n")
    risks = find_similar_blocks(blocks1, blocks2, threshold=threshold, min_length=min_length)
    
    print("\n" + "=" * 120)
    if risks:
        print(f"🚨 发现 {len(risks)} 处实质性雷同！\n")
        for i, r in enumerate(risks, 1):
            print(f"【重复项 {i}】相似度: {r['similarity']:.2%}")
            print(f"{name1}章节：{r['file1_section']}")
            print(f"{name1}内容：{r['file1_content']}")
            print("\n")
            print(f"{name2}章节：{r['file2_section']}")
            print(f"{name2}内容：{r['file2_content']}")
            print("\n")
            print(f"重复内容：{r['duplicate_content']}")
            print("=" * 120)
    else:
        print(f"✅ 未发现实质性雷同内容（阈值：{threshold:.0%}）")
    print("=" * 120)

if __name__ == "__main__":
    try:
        main()
    except ImportError:
        print("❌ 请运行：pip install python-docx tqdm")
    except Exception as e:
        print(f"\n💥 错误: {e}")
        import traceback
        traceback.print_exc()
    input("\n✅ 按回车退出...")